diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-17 00:20:36 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-17 00:20:36 +0200 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ia64 | |
download | linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.xz linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-ia64')
152 files changed, 22805 insertions, 0 deletions
diff --git a/include/asm-ia64/a.out.h b/include/asm-ia64/a.out.h new file mode 100644 index 000000000000..7293ac1df3ab --- /dev/null +++ b/include/asm-ia64/a.out.h @@ -0,0 +1,35 @@ +#ifndef _ASM_IA64_A_OUT_H +#define _ASM_IA64_A_OUT_H + +/* + * No a.out format has been (or should be) defined so this file is + * just a dummy that allows us to get binfmt_elf compiled. It + * probably would be better to clean up binfmt_elf.c so it does not + * necessarily depend on there being a.out support. + * + * Modified 1998-2002 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. + */ + +#include <linux/types.h> + +struct exec { + unsigned long a_info; + unsigned long a_text; + unsigned long a_data; + unsigned long a_bss; + unsigned long a_entry; +}; + +#define N_TXTADDR(x) 0 +#define N_DATADDR(x) 0 +#define N_BSSADDR(x) 0 +#define N_DRSIZE(x) 0 +#define N_TRSIZE(x) 0 +#define N_SYMSIZE(x) 0 +#define N_TXTOFF(x) 0 + +#ifdef __KERNEL__ +#include <asm/ustack.h> +#endif +#endif /* _ASM_IA64_A_OUT_H */ diff --git a/include/asm-ia64/acpi-ext.h b/include/asm-ia64/acpi-ext.h new file mode 100644 index 000000000000..9271d74c64cc --- /dev/null +++ b/include/asm-ia64/acpi-ext.h @@ -0,0 +1,17 @@ +/* + * ia64/platform/hp/common/hp_acpi.h + * + * Copyright (C) 2003 Hewlett-Packard + * Copyright (C) Alex Williamson + * Copyright (C) Bjorn Helgaas + * + * Vendor specific extensions to ACPI. + */ +#ifndef _ASM_IA64_ACPI_EXT_H +#define _ASM_IA64_ACPI_EXT_H + +#include <linux/types.h> + +extern acpi_status hp_acpi_csr_space (acpi_handle, u64 *base, u64 *length); + +#endif /* _ASM_IA64_ACPI_EXT_H */ diff --git a/include/asm-ia64/acpi.h b/include/asm-ia64/acpi.h new file mode 100644 index 000000000000..6a26a977f253 --- /dev/null +++ b/include/asm-ia64/acpi.h @@ -0,0 +1,112 @@ +/* + * asm-ia64/acpi.h + * + * Copyright (C) 1999 VA Linux Systems + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> + * Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com> + * Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#ifndef _ASM_ACPI_H +#define _ASM_ACPI_H + +#ifdef __KERNEL__ + +#include <linux/init.h> +#include <linux/numa.h> +#include <asm/system.h> + +#define COMPILER_DEPENDENT_INT64 long +#define COMPILER_DEPENDENT_UINT64 unsigned long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ + +#define ACPI_ASM_MACROS +#define BREAKPOINT3 +#define ACPI_DISABLE_IRQS() local_irq_disable() +#define ACPI_ENABLE_IRQS() local_irq_enable() +#define ACPI_FLUSH_CPU_CACHE() + +static inline int +ia64_acpi_acquire_global_lock (unsigned int *lock) +{ + unsigned int old, new, val; + do { + old = *lock; + new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1)); + val = ia64_cmpxchg4_acq(lock, new, old); + } while (unlikely (val != old)); + return (new < 3) ? -1 : 0; +} + +static inline int +ia64_acpi_release_global_lock (unsigned int *lock) +{ + unsigned int old, new, val; + do { + old = *lock; + new = old & ~0x3; + val = ia64_cmpxchg4_acq(lock, new, old); + } while (unlikely (val != old)); + return old & 0x1; +} + +#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \ + ((Acq) = ia64_acpi_acquire_global_lock((unsigned int *) GLptr)) + +#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \ + ((Acq) = ia64_acpi_release_global_lock((unsigned int *) GLptr)) + +#define acpi_disabled 0 /* ACPI always enabled on IA64 */ +#define acpi_noirq 0 /* ACPI always enabled on IA64 */ +#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ +#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ +static inline void disable_acpi(void) { } + +const char *acpi_get_sysname (void); +int acpi_request_vector (u32 int_type); +int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); + +#ifdef CONFIG_ACPI_NUMA +/* Proximity bitmap length; _PXM is at most 255 (8 bit)*/ +#define MAX_PXM_DOMAINS (256) +extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; +extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; +#endif + +extern u16 ia64_acpiid_to_sapicid[]; + +#endif /*__KERNEL__*/ + +#endif /*_ASM_ACPI_H*/ diff --git a/include/asm-ia64/agp.h b/include/asm-ia64/agp.h new file mode 100644 index 000000000000..d1316f1e6ee1 --- /dev/null +++ b/include/asm-ia64/agp.h @@ -0,0 +1,21 @@ +#ifndef _ASM_IA64_AGP_H +#define _ASM_IA64_AGP_H + +/* + * IA-64 specific AGP definitions. + * + * Copyright (C) 2002-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +/* + * To avoid memory-attribute aliasing issues, we require that the AGPGART engine operate + * in coherent mode, which lets us map the AGP memory as normal (write-back) memory + * (unlike x86, where it gets mapped "write-coalescing"). + */ +#define map_page_into_agp(page) /* nothing */ +#define unmap_page_from_agp(page) /* nothing */ +#define flush_agp_mappings() /* nothing */ +#define flush_agp_cache() mb() + +#endif /* _ASM_IA64_AGP_H */ diff --git a/include/asm-ia64/asmmacro.h b/include/asm-ia64/asmmacro.h new file mode 100644 index 000000000000..77af457f4ad7 --- /dev/null +++ b/include/asm-ia64/asmmacro.h @@ -0,0 +1,111 @@ +#ifndef _ASM_IA64_ASMMACRO_H +#define _ASM_IA64_ASMMACRO_H + +/* + * Copyright (C) 2000-2001, 2003-2004 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/config.h> + +#define ENTRY(name) \ + .align 32; \ + .proc name; \ +name: + +#define ENTRY_MIN_ALIGN(name) \ + .align 16; \ + .proc name; \ +name: + +#define GLOBAL_ENTRY(name) \ + .global name; \ + ENTRY(name) + +#define END(name) \ + .endp name + +/* + * Helper macros to make unwind directives more readable: + */ + +/* prologue_gr: */ +#define ASM_UNW_PRLG_RP 0x8 +#define ASM_UNW_PRLG_PFS 0x4 +#define ASM_UNW_PRLG_PSP 0x2 +#define ASM_UNW_PRLG_PR 0x1 +#define ASM_UNW_PRLG_GRSAVE(ninputs) (32+(ninputs)) + +/* + * Helper macros for accessing user memory. + */ + + .section "__ex_table", "a" // declare section & section attributes + .previous + +# define EX(y,x...) \ + .xdata4 "__ex_table", 99f-., y-.; \ + [99:] x +# define EXCLR(y,x...) \ + .xdata4 "__ex_table", 99f-., y-.+4; \ + [99:] x + +/* + * Mark instructions that need a load of a virtual address patched to be + * a load of a physical address. We use this either in critical performance + * path (ivt.S - TLB miss processing) or in places where it might not be + * safe to use a "tpa" instruction (mca_asm.S - error recovery). + */ + .section ".data.patch.vtop", "a" // declare section & section attributes + .previous + +#define LOAD_PHYSICAL(pr, reg, obj) \ +[1:](pr)movl reg = obj; \ + .xdata4 ".data.patch.vtop", 1b-. + +/* + * For now, we always put in the McKinley E9 workaround. On CPUs that don't need it, + * we'll patch out the work-around bundles with NOPs, so their impact is minimal. + */ +#define DO_MCKINLEY_E9_WORKAROUND + +#ifdef DO_MCKINLEY_E9_WORKAROUND + .section ".data.patch.mckinley_e9", "a" + .previous +/* workaround for Itanium 2 Errata 9: */ +# define FSYS_RETURN \ + .xdata4 ".data.patch.mckinley_e9", 1f-.; \ +1:{ .mib; \ + nop.m 0; \ + mov r16=ar.pfs; \ + br.call.sptk.many b7=2f;; \ + }; \ +2:{ .mib; \ + nop.m 0; \ + mov ar.pfs=r16; \ + br.ret.sptk.many b6;; \ + } +#else +# define FSYS_RETURN br.ret.sptk.many b6 +#endif + +/* + * Up until early 2004, use of .align within a function caused bad unwind info. + * TEXT_ALIGN(n) expands into ".align n" if a fixed GAS is available or into nothing + * otherwise. + */ +#ifdef HAVE_WORKING_TEXT_ALIGN +# define TEXT_ALIGN(n) .align n +#else +# define TEXT_ALIGN(n) +#endif + +#ifdef HAVE_SERIALIZE_DIRECTIVE +# define dv_serialize_data .serialize.data +# define dv_serialize_instruction .serialize.instruction +#else +# define dv_serialize_data +# define dv_serialize_instruction +#endif + +#endif /* _ASM_IA64_ASMMACRO_H */ diff --git a/include/asm-ia64/atomic.h b/include/asm-ia64/atomic.h new file mode 100644 index 000000000000..874a6f890e75 --- /dev/null +++ b/include/asm-ia64/atomic.h @@ -0,0 +1,183 @@ +#ifndef _ASM_IA64_ATOMIC_H +#define _ASM_IA64_ATOMIC_H + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + * + * NOTE: don't mess with the types below! The "unsigned long" and + * "int" types were carefully placed so as to ensure proper operation + * of the macros. + * + * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ +#include <linux/types.h> + +#include <asm/intrinsics.h> + +/* + * On IA-64, counter must always be volatile to ensure that that the + * memory accesses are ordered. + */ +typedef struct { volatile __s32 counter; } atomic_t; +typedef struct { volatile __s64 counter; } atomic64_t; + +#define ATOMIC_INIT(i) ((atomic_t) { (i) }) +#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) + +#define atomic_read(v) ((v)->counter) +#define atomic64_read(v) ((v)->counter) + +#define atomic_set(v,i) (((v)->counter) = (i)) +#define atomic64_set(v,i) (((v)->counter) = (i)) + +static __inline__ int +ia64_atomic_add (int i, atomic_t *v) +{ + __s32 old, new; + CMPXCHG_BUGCHECK_DECL + + do { + CMPXCHG_BUGCHECK(v); + old = atomic_read(v); + new = old + i; + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); + return new; +} + +static __inline__ int +ia64_atomic64_add (__s64 i, atomic64_t *v) +{ + __s64 old, new; + CMPXCHG_BUGCHECK_DECL + + do { + CMPXCHG_BUGCHECK(v); + old = atomic_read(v); + new = old + i; + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); + return new; +} + +static __inline__ int +ia64_atomic_sub (int i, atomic_t *v) +{ + __s32 old, new; + CMPXCHG_BUGCHECK_DECL + + do { + CMPXCHG_BUGCHECK(v); + old = atomic_read(v); + new = old - i; + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); + return new; +} + +static __inline__ int +ia64_atomic64_sub (__s64 i, atomic64_t *v) +{ + __s64 old, new; + CMPXCHG_BUGCHECK_DECL + + do { + CMPXCHG_BUGCHECK(v); + old = atomic_read(v); + new = old - i; + } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); + return new; +} + +#define atomic_add_return(i,v) \ +({ \ + int __ia64_aar_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ + ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ + : ia64_atomic_add(__ia64_aar_i, v); \ +}) + +#define atomic64_add_return(i,v) \ +({ \ + long __ia64_aar_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ + || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ + || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ + || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ + ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ + : ia64_atomic64_add(__ia64_aar_i, v); \ +}) + +/* + * Atomically add I to V and return TRUE if the resulting value is + * negative. + */ +static __inline__ int +atomic_add_negative (int i, atomic_t *v) +{ + return atomic_add_return(i, v) < 0; +} + +static __inline__ int +atomic64_add_negative (__s64 i, atomic64_t *v) +{ + return atomic64_add_return(i, v) < 0; +} + +#define atomic_sub_return(i,v) \ +({ \ + int __ia64_asr_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ + ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ + : ia64_atomic_sub(__ia64_asr_i, v); \ +}) + +#define atomic64_sub_return(i,v) \ +({ \ + long __ia64_asr_i = (i); \ + (__builtin_constant_p(i) \ + && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ + || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ + || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ + || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ + ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ + : ia64_atomic64_sub(__ia64_asr_i, v); \ +}) + +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1, (v)) + +#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) +#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) +#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) +#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) +#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) + +#define atomic_add(i,v) atomic_add_return((i), (v)) +#define atomic_sub(i,v) atomic_sub_return((i), (v)) +#define atomic_inc(v) atomic_add(1, (v)) +#define atomic_dec(v) atomic_sub(1, (v)) + +#define atomic64_add(i,v) atomic64_add_return((i), (v)) +#define atomic64_sub(i,v) atomic64_sub_return((i), (v)) +#define atomic64_inc(v) atomic64_add(1, (v)) +#define atomic64_dec(v) atomic64_sub(1, (v)) + +/* Atomic operations are already serializing */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#endif /* _ASM_IA64_ATOMIC_H */ diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h new file mode 100644 index 000000000000..925d54cee475 --- /dev/null +++ b/include/asm-ia64/bitops.h @@ -0,0 +1,410 @@ +#ifndef _ASM_IA64_BITOPS_H +#define _ASM_IA64_BITOPS_H + +/* + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * + * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1) + * scheduler patch + */ + +#include <linux/compiler.h> +#include <linux/types.h> +#include <asm/bitops.h> +#include <asm/intrinsics.h> + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + * + * The address must be (at least) "long" aligned. + * Note that there are driver (e.g., eepro100) which use these operations to operate on + * hw-defined data-structures, so we can't easily change these operations to force a + * bigger alignment. + * + * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). + */ +static __inline__ void +set_bit (int nr, volatile void *addr) +{ + __u32 bit, old, new; + volatile __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (volatile __u32 *) addr + (nr >> 5); + bit = 1 << (nr & 31); + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old | bit; + } while (cmpxchg_acq(m, old, new) != old); +} + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void +__set_bit (int nr, volatile void *addr) +{ + *((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31)); +} + +/* + * clear_bit() has "acquire" semantics. + */ +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() do { /* skip */; } while (0) + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static __inline__ void +clear_bit (int nr, volatile void *addr) +{ + __u32 mask, old, new; + volatile __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (volatile __u32 *) addr + (nr >> 5); + mask = ~(1 << (nr & 31)); + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old & mask; + } while (cmpxchg_acq(m, old, new) != old); +} + +/** + * __clear_bit - Clears a bit in memory (non-atomic version) + */ +static __inline__ void +__clear_bit (int nr, volatile void *addr) +{ + volatile __u32 *p = (__u32 *) addr + (nr >> 5); + __u32 m = 1 << (nr & 31); + *p &= ~m; +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static __inline__ void +change_bit (int nr, volatile void *addr) +{ + __u32 bit, old, new; + volatile __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (volatile __u32 *) addr + (nr >> 5); + bit = (1 << (nr & 31)); + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old ^ bit; + } while (cmpxchg_acq(m, old, new) != old); +} + +/** + * __change_bit - Toggle a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void +__change_bit (int nr, volatile void *addr) +{ + *((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31)); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int +test_and_set_bit (int nr, volatile void *addr) +{ + __u32 bit, old, new; + volatile __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (volatile __u32 *) addr + (nr >> 5); + bit = 1 << (nr & 31); + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old | bit; + } while (cmpxchg_acq(m, old, new) != old); + return (old & bit) != 0; +} + +/** + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int +__test_and_set_bit (int nr, volatile void *addr) +{ + __u32 *p = (__u32 *) addr + (nr >> 5); + __u32 m = 1 << (nr & 31); + int oldbitset = (*p & m) != 0; + + *p |= m; + return oldbitset; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int +test_and_clear_bit (int nr, volatile void *addr) +{ + __u32 mask, old, new; + volatile __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (volatile __u32 *) addr + (nr >> 5); + mask = ~(1 << (nr & 31)); + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old & mask; + } while (cmpxchg_acq(m, old, new) != old); + return (old & ~mask) != 0; +} + +/** + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int +__test_and_clear_bit(int nr, volatile void * addr) +{ + __u32 *p = (__u32 *) addr + (nr >> 5); + __u32 m = 1 << (nr & 31); + int oldbitset = *p & m; + + *p &= ~m; + return oldbitset; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int +test_and_change_bit (int nr, volatile void *addr) +{ + __u32 bit, old, new; + volatile __u32 *m; + CMPXCHG_BUGCHECK_DECL + + m = (volatile __u32 *) addr + (nr >> 5); + bit = (1 << (nr & 31)); + do { + CMPXCHG_BUGCHECK(m); + old = *m; + new = old ^ bit; + } while (cmpxchg_acq(m, old, new) != old); + return (old & bit) != 0; +} + +/* + * WARNING: non atomic version. + */ +static __inline__ int +__test_and_change_bit (int nr, void *addr) +{ + __u32 old, bit = (1 << (nr & 31)); + __u32 *m = (__u32 *) addr + (nr >> 5); + + old = *m; + *m = old ^ bit; + return (old & bit) != 0; +} + +static __inline__ int +test_bit (int nr, const volatile void *addr) +{ + return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31)); +} + +/** + * ffz - find the first zero bit in a long word + * @x: The long word to find the bit in + * + * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if + * no zero exists, so code should check against ~0UL first... + */ +static inline unsigned long +ffz (unsigned long x) +{ + unsigned long result; + + result = ia64_popcnt(x & (~x - 1)); + return result; +} + +/** + * __ffs - find first bit in word. + * @x: The word to search + * + * Undefined if no bit exists, so code should check against 0 first. + */ +static __inline__ unsigned long +__ffs (unsigned long x) +{ + unsigned long result; + + result = ia64_popcnt((x-1) & ~x); + return result; +} + +#ifdef __KERNEL__ + +/* + * find_last_zero_bit - find the last zero bit in a 64 bit quantity + * @x: The value to search + */ +static inline unsigned long +ia64_fls (unsigned long x) +{ + long double d = x; + long exp; + + exp = ia64_getf_exp(d); + return exp - 0xffff; +} + +static inline int +fls (int x) +{ + return ia64_fls((unsigned int) x); +} + +/* + * ffs: find first bit set. This is defined the same way as the libc and compiler builtin + * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on + * "int" values only and the result value is the bit number + 1. ffs(0) is defined to + * return zero. + */ +#define ffs(x) __builtin_ffs(x) + +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ +static __inline__ unsigned long +hweight64 (unsigned long x) +{ + unsigned long result; + result = ia64_popcnt(x); + return result; +} + +#define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) +#define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) +#define hweight8(x) (unsigned int) hweight64((x) & 0xfful) + +#endif /* __KERNEL__ */ + +extern int __find_next_zero_bit (const void *addr, unsigned long size, + unsigned long offset); +extern int __find_next_bit(const void *addr, unsigned long size, + unsigned long offset); + +#define find_next_zero_bit(addr, size, offset) \ + __find_next_zero_bit((addr), (size), (offset)) +#define find_next_bit(addr, size, offset) \ + __find_next_bit((addr), (size), (offset)) + +/* + * The optimizer actually does good code for this case.. + */ +#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) + +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) + +#ifdef __KERNEL__ + +#define __clear_bit(nr, addr) clear_bit(nr, addr) + +#define ext2_set_bit test_and_set_bit +#define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) +#define ext2_clear_bit test_and_clear_bit +#define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) +#define ext2_test_bit test_bit +#define ext2_find_first_zero_bit find_first_zero_bit +#define ext2_find_next_zero_bit find_next_zero_bit + +/* Bitmap functions for the minix filesystem. */ +#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) +#define minix_set_bit(nr,addr) set_bit(nr,addr) +#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) +#define minix_test_bit(nr,addr) test_bit(nr,addr) +#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) + +static inline int +sched_find_first_bit (unsigned long *b) +{ + if (unlikely(b[0])) + return __ffs(b[0]); + if (unlikely(b[1])) + return 64 + __ffs(b[1]); + return __ffs(b[2]) + 128; +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM_IA64_BITOPS_H */ diff --git a/include/asm-ia64/break.h b/include/asm-ia64/break.h new file mode 100644 index 000000000000..97c7b2d79600 --- /dev/null +++ b/include/asm-ia64/break.h @@ -0,0 +1,21 @@ +#ifndef _ASM_IA64_BREAK_H +#define _ASM_IA64_BREAK_H + +/* + * IA-64 Linux break numbers. + * + * Copyright (C) 1999 Hewlett-Packard Co + * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> + */ + +/* + * OS-specific debug break numbers: + */ +#define __IA64_BREAK_KDB 0x80100 + +/* + * OS-specific break numbers: + */ +#define __IA64_BREAK_SYSCALL 0x100000 + +#endif /* _ASM_IA64_BREAK_H */ diff --git a/include/asm-ia64/bug.h b/include/asm-ia64/bug.h new file mode 100644 index 000000000000..2c0cd51e8856 --- /dev/null +++ b/include/asm-ia64/bug.h @@ -0,0 +1,15 @@ +#ifndef _ASM_IA64_BUG_H +#define _ASM_IA64_BUG_H + +#if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1) +# define ia64_abort() __builtin_trap() +#else +# define ia64_abort() (*(volatile int *) 0 = 0) +#endif +#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0) + +/* should this BUG should be made generic? */ +#define HAVE_ARCH_BUG +#include <asm-generic/bug.h> + +#endif diff --git a/include/asm-ia64/bugs.h b/include/asm-ia64/bugs.h new file mode 100644 index 000000000000..433523e3b2ed --- /dev/null +++ b/include/asm-ia64/bugs.h @@ -0,0 +1,19 @@ +/* + * This is included by init/main.c to check for architecture-dependent bugs. + * + * Needs: + * void check_bugs(void); + * + * Based on <asm-alpha/bugs.h>. + * + * Modified 1998, 1999, 2003 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. + */ +#ifndef _ASM_IA64_BUGS_H +#define _ASM_IA64_BUGS_H + +#include <asm/processor.h> + +extern void check_bugs (void); + +#endif /* _ASM_IA64_BUGS_H */ diff --git a/include/asm-ia64/byteorder.h b/include/asm-ia64/byteorder.h new file mode 100644 index 000000000000..69bd41d7c26e --- /dev/null +++ b/include/asm-ia64/byteorder.h @@ -0,0 +1,42 @@ +#ifndef _ASM_IA64_BYTEORDER_H +#define _ASM_IA64_BYTEORDER_H + +/* + * Modified 1998, 1999 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. + */ + +#include <asm/types.h> +#include <asm/intrinsics.h> +#include <linux/compiler.h> + +static __inline__ __attribute_const__ __u64 +__ia64_swab64 (__u64 x) +{ + __u64 result; + + result = ia64_mux1(x, ia64_mux1_rev); + return result; +} + +static __inline__ __attribute_const__ __u32 +__ia64_swab32 (__u32 x) +{ + return __ia64_swab64(x) >> 32; +} + +static __inline__ __attribute_const__ __u16 +__ia64_swab16(__u16 x) +{ + return __ia64_swab64(x) >> 48; +} + +#define __arch__swab64(x) __ia64_swab64(x) +#define __arch__swab32(x) __ia64_swab32(x) +#define __arch__swab16(x) __ia64_swab16(x) + +#define __BYTEORDER_HAS_U64__ + +#include <linux/byteorder/little_endian.h> + +#endif /* _ASM_IA64_BYTEORDER_H */ diff --git a/include/asm-ia64/cache.h b/include/asm-ia64/cache.h new file mode 100644 index 000000000000..666d8f175cb3 --- /dev/null +++ b/include/asm-ia64/cache.h @@ -0,0 +1,30 @@ +#ifndef _ASM_IA64_CACHE_H +#define _ASM_IA64_CACHE_H + +#include <linux/config.h> + +/* + * Copyright (C) 1998-2000 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +/* Bytes per L1 (data) cache line. */ +#define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */ + +#ifdef CONFIG_SMP +# define SMP_CACHE_SHIFT L1_CACHE_SHIFT +# define SMP_CACHE_BYTES L1_CACHE_BYTES +#else + /* + * The "aligned" directive can only _increase_ alignment, so this is + * safe and provides an easy way to avoid wasting space on a + * uni-processor: + */ +# define SMP_CACHE_SHIFT 3 +# define SMP_CACHE_BYTES (1 << 3) +#endif + +#endif /* _ASM_IA64_CACHE_H */ diff --git a/include/asm-ia64/cacheflush.h b/include/asm-ia64/cacheflush.h new file mode 100644 index 000000000000..f2dacb4245ec --- /dev/null +++ b/include/asm-ia64/cacheflush.h @@ -0,0 +1,50 @@ +#ifndef _ASM_IA64_CACHEFLUSH_H +#define _ASM_IA64_CACHEFLUSH_H + +/* + * Copyright (C) 2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/page-flags.h> + +#include <asm/bitops.h> +#include <asm/page.h> + +/* + * Cache flushing routines. This is the kind of stuff that can be very expensive, so try + * to avoid them whenever possible. + */ + +#define flush_cache_all() do { } while (0) +#define flush_cache_mm(mm) do { } while (0) +#define flush_cache_range(vma, start, end) do { } while (0) +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) +#define flush_icache_page(vma,page) do { } while (0) +#define flush_cache_vmap(start, end) do { } while (0) +#define flush_cache_vunmap(start, end) do { } while (0) + +#define flush_dcache_page(page) \ +do { \ + clear_bit(PG_arch_1, &(page)->flags); \ +} while (0) + +#define flush_dcache_mmap_lock(mapping) do { } while (0) +#define flush_dcache_mmap_unlock(mapping) do { } while (0) + +extern void flush_icache_range (unsigned long start, unsigned long end); + +#define flush_icache_user_range(vma, page, user_addr, len) \ +do { \ + unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \ + flush_icache_range(_addr, _addr + (len)); \ +} while (0) + +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ +do { memcpy(dst, src, len); \ + flush_icache_user_range(vma, page, vaddr, len); \ +} while (0) +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + memcpy(dst, src, len) + +#endif /* _ASM_IA64_CACHEFLUSH_H */ diff --git a/include/asm-ia64/checksum.h b/include/asm-ia64/checksum.h new file mode 100644 index 000000000000..1f230ff8ea81 --- /dev/null +++ b/include/asm-ia64/checksum.h @@ -0,0 +1,76 @@ +#ifndef _ASM_IA64_CHECKSUM_H +#define _ASM_IA64_CHECKSUM_H + +/* + * Modified 1998, 1999 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +extern unsigned short ip_fast_csum (unsigned char * iph, unsigned int ihl); + +/* + * Computes the checksum of the TCP/UDP pseudo-header returns a 16-bit + * checksum, already complemented + */ +extern unsigned short int csum_tcpudp_magic (unsigned long saddr, + unsigned long daddr, + unsigned short len, + unsigned short proto, + unsigned int sum); + +extern unsigned int csum_tcpudp_nofold (unsigned long saddr, + unsigned long daddr, + unsigned short len, + unsigned short proto, + unsigned int sum); + +/* + * Computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +extern unsigned int csum_partial (const unsigned char * buff, int len, + unsigned int sum); + +/* + * Same as csum_partial, but copies from src while it checksums. + * + * Here it is even more important to align src and dst on a 32-bit (or + * even better 64-bit) boundary. + */ +extern unsigned int csum_partial_copy_from_user (const char *src, char *dst, + int len, unsigned int sum, + int *errp); + +extern unsigned int csum_partial_copy_nocheck (const char *src, char *dst, + int len, unsigned int sum); + +/* + * This routine is used for miscellaneous IP-like checksums, mainly in + * icmp.c + */ +extern unsigned short ip_compute_csum (unsigned char *buff, int len); + +/* + * Fold a partial checksum without adding pseudo headers. + */ +static inline unsigned short +csum_fold (unsigned int sum) +{ + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return ~sum; +} + +#endif /* _ASM_IA64_CHECKSUM_H */ diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h new file mode 100644 index 000000000000..cc0ff0a4bdd0 --- /dev/null +++ b/include/asm-ia64/compat.h @@ -0,0 +1,198 @@ +#ifndef _ASM_IA64_COMPAT_H +#define _ASM_IA64_COMPAT_H +/* + * Architecture specific compatibility types + */ +#include <linux/types.h> + +#define COMPAT_USER_HZ 100 + +typedef u32 compat_size_t; +typedef s32 compat_ssize_t; +typedef s32 compat_time_t; +typedef s32 compat_clock_t; +typedef s32 compat_key_t; +typedef s32 compat_pid_t; +typedef u16 compat_uid_t; +typedef u16 compat_gid_t; +typedef u32 compat_uid32_t; +typedef u32 compat_gid32_t; +typedef u16 compat_mode_t; +typedef u32 compat_ino_t; +typedef u16 compat_dev_t; +typedef s32 compat_off_t; +typedef s64 compat_loff_t; +typedef u16 compat_nlink_t; +typedef u16 compat_ipc_pid_t; +typedef s32 compat_daddr_t; +typedef u32 compat_caddr_t; +typedef __kernel_fsid_t compat_fsid_t; + +typedef s32 compat_int_t; +typedef s32 compat_long_t; +typedef u32 compat_uint_t; +typedef u32 compat_ulong_t; + +struct compat_timespec { + compat_time_t tv_sec; + s32 tv_nsec; +}; + +struct compat_timeval { + compat_time_t tv_sec; + s32 tv_usec; +}; + +struct compat_stat { + compat_dev_t st_dev; + u16 __pad1; + compat_ino_t st_ino; + compat_mode_t st_mode; + compat_nlink_t st_nlink; + compat_uid_t st_uid; + compat_gid_t st_gid; + compat_dev_t st_rdev; + u16 __pad2; + u32 st_size; + u32 st_blksize; + u32 st_blocks; + u32 st_atime; + u32 st_atime_nsec; + u32 st_mtime; + u32 st_mtime_nsec; + u32 st_ctime; + u32 st_ctime_nsec; + u32 __unused4; + u32 __unused5; +}; + +struct compat_flock { + short l_type; + short l_whence; + compat_off_t l_start; + compat_off_t l_len; + compat_pid_t l_pid; +}; + +#define F_GETLK64 12 +#define F_SETLK64 13 +#define F_SETLKW64 14 + +/* + * IA32 uses 4 byte alignment for 64 bit quantities, + * so we need to pack this structure. + */ +struct compat_flock64 { + short l_type; + short l_whence; + compat_loff_t l_start; + compat_loff_t l_len; + compat_pid_t l_pid; +} __attribute__((packed)); + +struct compat_statfs { + int f_type; + int f_bsize; + int f_blocks; + int f_bfree; + int f_bavail; + int f_files; + int f_ffree; + compat_fsid_t f_fsid; + int f_namelen; /* SunOS ignores this field. */ + int f_frsize; + int f_spare[5]; +}; + +#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff +#define COMPAT_RLIM_INFINITY 0xffffffff + +typedef u32 compat_old_sigset_t; /* at least 32 bits */ + +#define _COMPAT_NSIG 64 +#define _COMPAT_NSIG_BPW 32 + +typedef u32 compat_sigset_word; + +#define COMPAT_OFF_T_MAX 0x7fffffff +#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL + +struct compat_ipc64_perm { + compat_key_t key; + compat_uid32_t uid; + compat_gid32_t gid; + compat_uid32_t cuid; + compat_gid32_t cgid; + unsigned short mode; + unsigned short __pad1; + unsigned short seq; + unsigned short __pad2; + compat_ulong_t unused1; + compat_ulong_t unused2; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_time_t sem_otime; + compat_ulong_t __unused1; + compat_time_t sem_ctime; + compat_ulong_t __unused2; + compat_ulong_t sem_nsems; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; + compat_time_t msg_stime; + compat_ulong_t __unused1; + compat_time_t msg_rtime; + compat_ulong_t __unused2; + compat_time_t msg_ctime; + compat_ulong_t __unused3; + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_size_t shm_segsz; + compat_time_t shm_atime; + compat_ulong_t __unused1; + compat_time_t shm_dtime; + compat_ulong_t __unused2; + compat_time_t shm_ctime; + compat_ulong_t __unused3; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +/* + * A pointer passed in from user mode. This should not be used for syscall parameters, + * just declare them as pointers because the syscall entry code will have appropriately + * comverted them already. + */ +typedef u32 compat_uptr_t; + +static inline void __user * +compat_ptr (compat_uptr_t uptr) +{ + return (void __user *) (unsigned long) uptr; +} + +static __inline__ void __user * +compat_alloc_user_space (long len) +{ + struct pt_regs *regs = ia64_task_regs(current); + return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); +} + +#endif /* _ASM_IA64_COMPAT_H */ diff --git a/include/asm-ia64/cpu.h b/include/asm-ia64/cpu.h new file mode 100644 index 000000000000..e87fa3210a2b --- /dev/null +++ b/include/asm-ia64/cpu.h @@ -0,0 +1,22 @@ +#ifndef _ASM_IA64_CPU_H_ +#define _ASM_IA64_CPU_H_ + +#include <linux/device.h> +#include <linux/cpu.h> +#include <linux/topology.h> +#include <linux/percpu.h> + +struct ia64_cpu { + struct cpu cpu; +}; + +DECLARE_PER_CPU(struct ia64_cpu, cpu_devices); + +DECLARE_PER_CPU(int, cpu_state); + +extern int arch_register_cpu(int num); +#ifdef CONFIG_HOTPLUG_CPU +extern void arch_unregister_cpu(int); +#endif + +#endif /* _ASM_IA64_CPU_H_ */ diff --git a/include/asm-ia64/cputime.h b/include/asm-ia64/cputime.h new file mode 100644 index 000000000000..72400a78002a --- /dev/null +++ b/include/asm-ia64/cputime.h @@ -0,0 +1,6 @@ +#ifndef __IA64_CPUTIME_H +#define __IA64_CPUTIME_H + +#include <asm-generic/cputime.h> + +#endif /* __IA64_CPUTIME_H */ diff --git a/include/asm-ia64/current.h b/include/asm-ia64/current.h new file mode 100644 index 000000000000..c659f90fbfd9 --- /dev/null +++ b/include/asm-ia64/current.h @@ -0,0 +1,17 @@ +#ifndef _ASM_IA64_CURRENT_H +#define _ASM_IA64_CURRENT_H + +/* + * Modified 1998-2000 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +#include <asm/intrinsics.h> + +/* + * In kernel mode, thread pointer (r13) is used to point to the current task + * structure. + */ +#define current ((struct task_struct *) ia64_getreg(_IA64_REG_TP)) + +#endif /* _ASM_IA64_CURRENT_H */ diff --git a/include/asm-ia64/cyclone.h b/include/asm-ia64/cyclone.h new file mode 100644 index 000000000000..88f6500e84ab --- /dev/null +++ b/include/asm-ia64/cyclone.h @@ -0,0 +1,15 @@ +#ifndef ASM_IA64_CYCLONE_H +#define ASM_IA64_CYCLONE_H + +#ifdef CONFIG_IA64_CYCLONE +extern int use_cyclone; +extern void __init cyclone_setup(void); +#else /* CONFIG_IA64_CYCLONE */ +#define use_cyclone 0 +static inline void cyclone_setup(void) +{ + printk(KERN_ERR "Cyclone Counter: System not configured" + " w/ CONFIG_IA64_CYCLONE.\n"); +} +#endif /* CONFIG_IA64_CYCLONE */ +#endif /* !ASM_IA64_CYCLONE_H */ diff --git a/include/asm-ia64/delay.h b/include/asm-ia64/delay.h new file mode 100644 index 000000000000..57182d6f2b9a --- /dev/null +++ b/include/asm-ia64/delay.h @@ -0,0 +1,97 @@ +#ifndef _ASM_IA64_DELAY_H +#define _ASM_IA64_DELAY_H + +/* + * Delay routines using a pre-computed "cycles/usec" value. + * + * Copyright (C) 1998, 1999 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1999 VA Linux Systems + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> + * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> + */ + +#include <linux/config.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/compiler.h> + +#include <asm/intrinsics.h> +#include <asm/processor.h> + +static __inline__ void +ia64_set_itm (unsigned long val) +{ + ia64_setreg(_IA64_REG_CR_ITM, val); + ia64_srlz_d(); +} + +static __inline__ unsigned long +ia64_get_itm (void) +{ + unsigned long result; + + result = ia64_getreg(_IA64_REG_CR_ITM); + ia64_srlz_d(); + return result; +} + +static __inline__ void +ia64_set_itv (unsigned long val) +{ + ia64_setreg(_IA64_REG_CR_ITV, val); + ia64_srlz_d(); +} + +static __inline__ unsigned long +ia64_get_itv (void) +{ + return ia64_getreg(_IA64_REG_CR_ITV); +} + +static __inline__ void +ia64_set_itc (unsigned long val) +{ + ia64_setreg(_IA64_REG_AR_ITC, val); + ia64_srlz_d(); +} + +static __inline__ unsigned long +ia64_get_itc (void) +{ + unsigned long result; + + result = ia64_getreg(_IA64_REG_AR_ITC); + ia64_barrier(); +#ifdef CONFIG_ITANIUM + while (unlikely((__s32) result == -1)) { + result = ia64_getreg(_IA64_REG_AR_ITC); + ia64_barrier(); + } +#endif + return result; +} + +extern void ia64_delay_loop (unsigned long loops); + +static __inline__ void +__delay (unsigned long loops) +{ + if (unlikely(loops < 1)) + return; + + ia64_delay_loop (loops - 1); +} + +static __inline__ void +udelay (unsigned long usecs) +{ + unsigned long start = ia64_get_itc(); + unsigned long cycles = usecs*local_cpu_data->cyc_per_usec; + + while (ia64_get_itc() - start < cycles) + cpu_relax(); +} + +#endif /* _ASM_IA64_DELAY_H */ diff --git a/include/asm-ia64/div64.h b/include/asm-ia64/div64.h new file mode 100644 index 000000000000..6cd978cefb28 --- /dev/null +++ b/include/asm-ia64/div64.h @@ -0,0 +1 @@ +#include <asm-generic/div64.h> diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h new file mode 100644 index 000000000000..6347c9845642 --- /dev/null +++ b/include/asm-ia64/dma-mapping.h @@ -0,0 +1,70 @@ +#ifndef _ASM_IA64_DMA_MAPPING_H +#define _ASM_IA64_DMA_MAPPING_H + +/* + * Copyright (C) 2003-2004 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ +#include <linux/config.h> +#include <asm/machvec.h> + +#define dma_alloc_coherent platform_dma_alloc_coherent +#define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */ +#define dma_free_coherent platform_dma_free_coherent +#define dma_free_noncoherent platform_dma_free_coherent +#define dma_map_single platform_dma_map_single +#define dma_map_sg platform_dma_map_sg +#define dma_unmap_single platform_dma_unmap_single +#define dma_unmap_sg platform_dma_unmap_sg +#define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu +#define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu +#define dma_sync_single_for_device platform_dma_sync_single_for_device +#define dma_sync_sg_for_device platform_dma_sync_sg_for_device +#define dma_mapping_error platform_dma_mapping_error + +#define dma_map_page(dev, pg, off, size, dir) \ + dma_map_single(dev, page_address(pg) + (off), (size), (dir)) +#define dma_unmap_page(dev, dma_addr, size, dir) \ + dma_unmap_single(dev, dma_addr, size, dir) + +/* + * Rest of this file is part of the "Advanced DMA API". Use at your own risk. + * See Documentation/DMA-API.txt for details. + */ + +#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ + dma_sync_single_for_cpu(dev, dma_handle, size, dir) +#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ + dma_sync_single_for_device(dev, dma_handle, size, dir) + +#define dma_supported platform_dma_supported + +static inline int +dma_set_mask (struct device *dev, u64 mask) +{ + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + *dev->dma_mask = mask; + return 0; +} + +static inline int +dma_get_cache_alignment (void) +{ + extern int ia64_max_cacheline_size; + return ia64_max_cacheline_size; +} + +static inline void +dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) +{ + /* + * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to + * ensure that dma_cache_sync() enforces order, hence the mb(). + */ + mb(); +} + +#define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */ + +#endif /* _ASM_IA64_DMA_MAPPING_H */ diff --git a/include/asm-ia64/dma.h b/include/asm-ia64/dma.h new file mode 100644 index 000000000000..3be1b4925e18 --- /dev/null +++ b/include/asm-ia64/dma.h @@ -0,0 +1,23 @@ +#ifndef _ASM_IA64_DMA_H +#define _ASM_IA64_DMA_H + +/* + * Copyright (C) 1998-2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/config.h> + +#include <asm/io.h> /* need byte IO */ + +extern unsigned long MAX_DMA_ADDRESS; + +#ifdef CONFIG_PCI + extern int isa_dma_bridge_buggy; +#else +# define isa_dma_bridge_buggy (0) +#endif + +#define free_dma(x) + +#endif /* _ASM_IA64_DMA_H */ diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h new file mode 100644 index 000000000000..7d4ccc4b976e --- /dev/null +++ b/include/asm-ia64/elf.h @@ -0,0 +1,259 @@ +#ifndef _ASM_IA64_ELF_H +#define _ASM_IA64_ELF_H + +/* + * ELF-specific definitions. + * + * Copyright (C) 1998-1999, 2002-2004 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/config.h> + +#include <asm/fpu.h> +#include <asm/page.h> + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_IA_64) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_IA_64 + +#define USE_ELF_CORE_DUMP + +/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are + interpreted as follows by Linux: */ +#define EF_IA_64_LINUX_EXECUTABLE_STACK 0x1 /* is stack (& heap) executable by default? */ + +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. + * Typical use of this is to invoke "./ld.so someprog" to test out a + * new version of the loader. We need to make sure that it is out of + * the way of the program that it will "exec", and that there is + * sufficient room for the brk. + */ +#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL) + +#define PT_IA_64_UNWIND 0x70000001 + +/* IA-64 relocations: */ +#define R_IA64_NONE 0x00 /* none */ +#define R_IA64_IMM14 0x21 /* symbol + addend, add imm14 */ +#define R_IA64_IMM22 0x22 /* symbol + addend, add imm22 */ +#define R_IA64_IMM64 0x23 /* symbol + addend, mov imm64 */ +#define R_IA64_DIR32MSB 0x24 /* symbol + addend, data4 MSB */ +#define R_IA64_DIR32LSB 0x25 /* symbol + addend, data4 LSB */ +#define R_IA64_DIR64MSB 0x26 /* symbol + addend, data8 MSB */ +#define R_IA64_DIR64LSB 0x27 /* symbol + addend, data8 LSB */ +#define R_IA64_GPREL22 0x2a /* @gprel(sym+add), add imm22 */ +#define R_IA64_GPREL64I 0x2b /* @gprel(sym+add), mov imm64 */ +#define R_IA64_GPREL32MSB 0x2c /* @gprel(sym+add), data4 MSB */ +#define R_IA64_GPREL32LSB 0x2d /* @gprel(sym+add), data4 LSB */ +#define R_IA64_GPREL64MSB 0x2e /* @gprel(sym+add), data8 MSB */ +#define R_IA64_GPREL64LSB 0x2f /* @gprel(sym+add), data8 LSB */ +#define R_IA64_LTOFF22 0x32 /* @ltoff(sym+add), add imm22 */ +#define R_IA64_LTOFF64I 0x33 /* @ltoff(sym+add), mov imm64 */ +#define R_IA64_PLTOFF22 0x3a /* @pltoff(sym+add), add imm22 */ +#define R_IA64_PLTOFF64I 0x3b /* @pltoff(sym+add), mov imm64 */ +#define R_IA64_PLTOFF64MSB 0x3e /* @pltoff(sym+add), data8 MSB */ +#define R_IA64_PLTOFF64LSB 0x3f /* @pltoff(sym+add), data8 LSB */ +#define R_IA64_FPTR64I 0x43 /* @fptr(sym+add), mov imm64 */ +#define R_IA64_FPTR32MSB 0x44 /* @fptr(sym+add), data4 MSB */ +#define R_IA64_FPTR32LSB 0x45 /* @fptr(sym+add), data4 LSB */ +#define R_IA64_FPTR64MSB 0x46 /* @fptr(sym+add), data8 MSB */ +#define R_IA64_FPTR64LSB 0x47 /* @fptr(sym+add), data8 LSB */ +#define R_IA64_PCREL60B 0x48 /* @pcrel(sym+add), brl */ +#define R_IA64_PCREL21B 0x49 /* @pcrel(sym+add), ptb, call */ +#define R_IA64_PCREL21M 0x4a /* @pcrel(sym+add), chk.s */ +#define R_IA64_PCREL21F 0x4b /* @pcrel(sym+add), fchkf */ +#define R_IA64_PCREL32MSB 0x4c /* @pcrel(sym+add), data4 MSB */ +#define R_IA64_PCREL32LSB 0x4d /* @pcrel(sym+add), data4 LSB */ +#define R_IA64_PCREL64MSB 0x4e /* @pcrel(sym+add), data8 MSB */ +#define R_IA64_PCREL64LSB 0x4f /* @pcrel(sym+add), data8 LSB */ +#define R_IA64_LTOFF_FPTR22 0x52 /* @ltoff(@fptr(s+a)), imm22 */ +#define R_IA64_LTOFF_FPTR64I 0x53 /* @ltoff(@fptr(s+a)), imm64 */ +#define R_IA64_LTOFF_FPTR32MSB 0x54 /* @ltoff(@fptr(s+a)), 4 MSB */ +#define R_IA64_LTOFF_FPTR32LSB 0x55 /* @ltoff(@fptr(s+a)), 4 LSB */ +#define R_IA64_LTOFF_FPTR64MSB 0x56 /* @ltoff(@fptr(s+a)), 8 MSB */ +#define R_IA64_LTOFF_FPTR64LSB 0x57 /* @ltoff(@fptr(s+a)), 8 LSB */ +#define R_IA64_SEGREL32MSB 0x5c /* @segrel(sym+add), data4 MSB */ +#define R_IA64_SEGREL32LSB 0x5d /* @segrel(sym+add), data4 LSB */ +#define R_IA64_SEGREL64MSB 0x5e /* @segrel(sym+add), data8 MSB */ +#define R_IA64_SEGREL64LSB 0x5f /* @segrel(sym+add), data8 LSB */ +#define R_IA64_SECREL32MSB 0x64 /* @secrel(sym+add), data4 MSB */ +#define R_IA64_SECREL32LSB 0x65 /* @secrel(sym+add), data4 LSB */ +#define R_IA64_SECREL64MSB 0x66 /* @secrel(sym+add), data8 MSB */ +#define R_IA64_SECREL64LSB 0x67 /* @secrel(sym+add), data8 LSB */ +#define R_IA64_REL32MSB 0x6c /* data 4 + REL */ +#define R_IA64_REL32LSB 0x6d /* data 4 + REL */ +#define R_IA64_REL64MSB 0x6e /* data 8 + REL */ +#define R_IA64_REL64LSB 0x6f /* data 8 + REL */ +#define R_IA64_LTV32MSB 0x74 /* symbol + addend, data4 MSB */ +#define R_IA64_LTV32LSB 0x75 /* symbol + addend, data4 LSB */ +#define R_IA64_LTV64MSB 0x76 /* symbol + addend, data8 MSB */ +#define R_IA64_LTV64LSB 0x77 /* symbol + addend, data8 LSB */ +#define R_IA64_PCREL21BI 0x79 /* @pcrel(sym+add), ptb, call */ +#define R_IA64_PCREL22 0x7a /* @pcrel(sym+add), imm22 */ +#define R_IA64_PCREL64I 0x7b /* @pcrel(sym+add), imm64 */ +#define R_IA64_IPLTMSB 0x80 /* dynamic reloc, imported PLT, MSB */ +#define R_IA64_IPLTLSB 0x81 /* dynamic reloc, imported PLT, LSB */ +#define R_IA64_COPY 0x84 /* dynamic reloc, data copy */ +#define R_IA64_SUB 0x85 /* -symbol + addend, add imm22 */ +#define R_IA64_LTOFF22X 0x86 /* LTOFF22, relaxable. */ +#define R_IA64_LDXMOV 0x87 /* Use of LTOFF22X. */ +#define R_IA64_TPREL14 0x91 /* @tprel(sym+add), add imm14 */ +#define R_IA64_TPREL22 0x92 /* @tprel(sym+add), add imm22 */ +#define R_IA64_TPREL64I 0x93 /* @tprel(sym+add), add imm64 */ +#define R_IA64_TPREL64MSB 0x96 /* @tprel(sym+add), data8 MSB */ +#define R_IA64_TPREL64LSB 0x97 /* @tprel(sym+add), data8 LSB */ +#define R_IA64_LTOFF_TPREL22 0x9a /* @ltoff(@tprel(s+a)), add imm22 */ +#define R_IA64_DTPMOD64MSB 0xa6 /* @dtpmod(sym+add), data8 MSB */ +#define R_IA64_DTPMOD64LSB 0xa7 /* @dtpmod(sym+add), data8 LSB */ +#define R_IA64_LTOFF_DTPMOD22 0xaa /* @ltoff(@dtpmod(s+a)), imm22 */ +#define R_IA64_DTPREL14 0xb1 /* @dtprel(sym+add), imm14 */ +#define R_IA64_DTPREL22 0xb2 /* @dtprel(sym+add), imm22 */ +#define R_IA64_DTPREL64I 0xb3 /* @dtprel(sym+add), imm64 */ +#define R_IA64_DTPREL32MSB 0xb4 /* @dtprel(sym+add), data4 MSB */ +#define R_IA64_DTPREL32LSB 0xb5 /* @dtprel(sym+add), data4 LSB */ +#define R_IA64_DTPREL64MSB 0xb6 /* @dtprel(sym+add), data8 MSB */ +#define R_IA64_DTPREL64LSB 0xb7 /* @dtprel(sym+add), data8 LSB */ +#define R_IA64_LTOFF_DTPREL22 0xba /* @ltoff(@dtprel(s+a)), imm22 */ + +/* IA-64 specific section flags: */ +#define SHF_IA_64_SHORT 0x10000000 /* section near gp */ + +/* + * We use (abuse?) this macro to insert the (empty) vm_area that is + * used to map the register backing store. I don't see any better + * place to do this, but we should discuss this with Linus once we can + * talk to him... + */ +extern void ia64_init_addr_space (void); +#define ELF_PLAT_INIT(_r, load_addr) ia64_init_addr_space() + +/* ELF register definitions. This is needed for core dump support. */ + +/* + * elf_gregset_t contains the application-level state in the following order: + * r0-r31 + * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) + * predicate registers (p0-p63) + * b0-b7 + * ip cfm psr + * ar.rsc ar.bsp ar.bspstore ar.rnat + * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd + */ +#define ELF_NGREG 128 /* we really need just 72 but let's leave some headroom... */ +#define ELF_NFPREG 128 /* f0 and f1 could be omitted, but so what... */ + +typedef unsigned long elf_fpxregset_t; + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef struct ia64_fpreg elf_fpreg_t; +typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; + + + +struct pt_regs; /* forward declaration... */ +extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst); +#define ELF_CORE_COPY_REGS(_dest,_regs) ia64_elf_core_copy_regs(_regs, _dest); + +/* This macro yields a bitmask that programs can use to figure out + what instruction set this CPU supports. */ +#define ELF_HWCAP 0 + +/* This macro yields a string that ld.so will use to load + implementation specific libraries for optimization. Not terribly + relevant until we have real hardware to play with... */ +#define ELF_PLATFORM NULL + +/* + * Architecture-neutral AT_ values are in the range 0-17. Leave some room for more of + * them, start the architecture-specific ones at 32. + */ +#define AT_SYSINFO 32 +#define AT_SYSINFO_EHDR 33 + +#ifdef __KERNEL__ +#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX) +#define elf_read_implies_exec(ex, executable_stack) \ + ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0) + +struct task_struct; + +extern int dump_task_regs(struct task_struct *, elf_gregset_t *); +extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); + +#define ELF_CORE_COPY_TASK_REGS(tsk, elf_gregs) dump_task_regs(tsk, elf_gregs) +#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) + +#define GATE_EHDR ((const struct elfhdr *) GATE_ADDR) + +#define ARCH_DLINFO \ +do { \ + extern char __kernel_syscall_via_epc[]; \ + NEW_AUX_ENT(AT_SYSINFO, (unsigned long) __kernel_syscall_via_epc); \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \ +} while (0) + + +/* + * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out + * extra segments containing the gate DSO contents. Dumping its + * contents makes post-mortem fully interpretable later without matching up + * the same kernel and hardware config to see what PC values meant. + * Dumping its extra ELF program headers includes all the other information + * a debugger needs to easily find how the gate DSO was being used. + */ +#define ELF_CORE_EXTRA_PHDRS (GATE_EHDR->e_phnum) +#define ELF_CORE_WRITE_EXTRA_PHDRS \ +do { \ + const struct elf_phdr *const gate_phdrs = \ + (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \ + int i; \ + Elf64_Off ofs = 0; \ + for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \ + struct elf_phdr phdr = gate_phdrs[i]; \ + if (phdr.p_type == PT_LOAD) { \ + phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \ + phdr.p_filesz = phdr.p_memsz; \ + if (ofs == 0) { \ + ofs = phdr.p_offset = offset; \ + offset += phdr.p_filesz; \ + } \ + else \ + phdr.p_offset = ofs; \ + } \ + else \ + phdr.p_offset += ofs; \ + phdr.p_paddr = 0; /* match other core phdrs */ \ + DUMP_WRITE(&phdr, sizeof(phdr)); \ + } \ +} while (0) +#define ELF_CORE_WRITE_EXTRA_DATA \ +do { \ + const struct elf_phdr *const gate_phdrs = \ + (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \ + int i; \ + for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \ + if (gate_phdrs[i].p_type == PT_LOAD) { \ + DUMP_WRITE((void *) gate_phdrs[i].p_vaddr, \ + PAGE_ALIGN(gate_phdrs[i].p_memsz)); \ + break; \ + } \ + } \ +} while (0) + +#endif /* __KERNEL__ */ + +#endif /* _ASM_IA64_ELF_H */ diff --git a/include/asm-ia64/errno.h b/include/asm-ia64/errno.h new file mode 100644 index 000000000000..4c82b503d92f --- /dev/null +++ b/include/asm-ia64/errno.h @@ -0,0 +1 @@ +#include <asm-generic/errno.h> diff --git a/include/asm-ia64/fcntl.h b/include/asm-ia64/fcntl.h new file mode 100644 index 000000000000..d193981bb1d8 --- /dev/null +++ b/include/asm-ia64/fcntl.h @@ -0,0 +1,84 @@ +#ifndef _ASM_IA64_FCNTL_H +#define _ASM_IA64_FCNTL_H +/* + * Based on <asm-i386/fcntl.h>. + * + * Modified 1998-2000 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co. + */ + +/* + * open/fcntl - O_SYNC is only implemented on blocks devices and on + * files located on an ext2 file system + */ +#define O_ACCMODE 0003 +#define O_RDONLY 00 +#define O_WRONLY 01 +#define O_RDWR 02 +#define O_CREAT 0100 /* not fcntl */ +#define O_EXCL 0200 /* not fcntl */ +#define O_NOCTTY 0400 /* not fcntl */ +#define O_TRUNC 01000 /* not fcntl */ +#define O_APPEND 02000 +#define O_NONBLOCK 04000 +#define O_NDELAY O_NONBLOCK +#define O_SYNC 010000 +#define FASYNC 020000 /* fcntl, for BSD compatibility */ +#define O_DIRECT 040000 /* direct disk access hint - currently ignored */ +#define O_LARGEFILE 0100000 +#define O_DIRECTORY 0200000 /* must be a directory */ +#define O_NOFOLLOW 0400000 /* don't follow links */ +#define O_NOATIME 01000000 + +#define F_DUPFD 0 /* dup */ +#define F_GETFD 1 /* get close_on_exec */ +#define F_SETFD 2 /* set/clear close_on_exec */ +#define F_GETFL 3 /* get file->f_flags */ +#define F_SETFL 4 /* set file->f_flags */ +#define F_GETLK 5 +#define F_SETLK 6 +#define F_SETLKW 7 + +#define F_SETOWN 8 /* for sockets. */ +#define F_GETOWN 9 /* for sockets. */ +#define F_SETSIG 10 /* for sockets. */ +#define F_GETSIG 11 /* for sockets. */ + +/* for F_[GET|SET]FL */ +#define FD_CLOEXEC 1 /* actually anything with low bit set goes */ + +/* for posix fcntl() and lockf() */ +#define F_RDLCK 0 +#define F_WRLCK 1 +#define F_UNLCK 2 + +/* for old implementation of bsd flock () */ +#define F_EXLCK 4 /* or 3 */ +#define F_SHLCK 8 /* or 4 */ + +/* for leases */ +#define F_INPROGRESS 16 + +/* operations for bsd flock(), also used by the kernel implementation */ +#define LOCK_SH 1 /* shared lock */ +#define LOCK_EX 2 /* exclusive lock */ +#define LOCK_NB 4 /* or'd with one of the above to prevent + blocking */ +#define LOCK_UN 8 /* remove lock */ + +#define LOCK_MAND 32 /* This is a mandatory flock */ +#define LOCK_READ 64 /* ... Which allows concurrent read operations */ +#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */ +#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */ + +struct flock { + short l_type; + short l_whence; + off_t l_start; + off_t l_len; + pid_t l_pid; +}; + +#define F_LINUX_SPECIFIC_BASE 1024 + +#endif /* _ASM_IA64_FCNTL_H */ diff --git a/include/asm-ia64/fpswa.h b/include/asm-ia64/fpswa.h new file mode 100644 index 000000000000..62edfceadaa6 --- /dev/null +++ b/include/asm-ia64/fpswa.h @@ -0,0 +1,73 @@ +#ifndef _ASM_IA64_FPSWA_H +#define _ASM_IA64_FPSWA_H + +/* + * Floating-point Software Assist + * + * Copyright (C) 1999 Intel Corporation. + * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> + * Copyright (C) 1999 Goutham Rao <goutham.rao@intel.com> + */ + +typedef struct { + /* 4 * 128 bits */ + unsigned long fp_lp[4*2]; +} fp_state_low_preserved_t; + +typedef struct { + /* 10 * 128 bits */ + unsigned long fp_lv[10 * 2]; +} fp_state_low_volatile_t; + +typedef struct { + /* 16 * 128 bits */ + unsigned long fp_hp[16 * 2]; +} fp_state_high_preserved_t; + +typedef struct { + /* 96 * 128 bits */ + unsigned long fp_hv[96 * 2]; +} fp_state_high_volatile_t; + +/** + * floating point state to be passed to the FP emulation library by + * the trap/fault handler + */ +typedef struct { + unsigned long bitmask_low64; + unsigned long bitmask_high64; + fp_state_low_preserved_t *fp_state_low_preserved; + fp_state_low_volatile_t *fp_state_low_volatile; + fp_state_high_preserved_t *fp_state_high_preserved; + fp_state_high_volatile_t *fp_state_high_volatile; +} fp_state_t; + +typedef struct { + unsigned long status; + unsigned long err0; + unsigned long err1; + unsigned long err2; +} fpswa_ret_t; + +/** + * function header for the Floating Point software assist + * library. This function is invoked by the Floating point software + * assist trap/fault handler. + */ +typedef fpswa_ret_t (*efi_fpswa_t) (unsigned long trap_type, void *bundle, unsigned long *ipsr, + unsigned long *fsr, unsigned long *isr, unsigned long *preds, + unsigned long *ifs, fp_state_t *fp_state); + +/** + * This is the FPSWA library interface as defined by EFI. We need to pass a + * pointer to the interface itself on a call to the assist library + */ +typedef struct { + unsigned int revision; + unsigned int reserved; + efi_fpswa_t fpswa; +} fpswa_interface_t; + +extern fpswa_interface_t *fpswa_interface; + +#endif /* _ASM_IA64_FPSWA_H */ diff --git a/include/asm-ia64/fpu.h b/include/asm-ia64/fpu.h new file mode 100644 index 000000000000..3859558ff0a4 --- /dev/null +++ b/include/asm-ia64/fpu.h @@ -0,0 +1,66 @@ +#ifndef _ASM_IA64_FPU_H +#define _ASM_IA64_FPU_H + +/* + * Copyright (C) 1998, 1999, 2002, 2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <asm/types.h> + +/* floating point status register: */ +#define FPSR_TRAP_VD (1 << 0) /* invalid op trap disabled */ +#define FPSR_TRAP_DD (1 << 1) /* denormal trap disabled */ +#define FPSR_TRAP_ZD (1 << 2) /* zero-divide trap disabled */ +#define FPSR_TRAP_OD (1 << 3) /* overflow trap disabled */ +#define FPSR_TRAP_UD (1 << 4) /* underflow trap disabled */ +#define FPSR_TRAP_ID (1 << 5) /* inexact trap disabled */ +#define FPSR_S0(x) ((x) << 6) +#define FPSR_S1(x) ((x) << 19) +#define FPSR_S2(x) (__IA64_UL(x) << 32) +#define FPSR_S3(x) (__IA64_UL(x) << 45) + +/* floating-point status field controls: */ +#define FPSF_FTZ (1 << 0) /* flush-to-zero */ +#define FPSF_WRE (1 << 1) /* widest-range exponent */ +#define FPSF_PC(x) (((x) & 0x3) << 2) /* precision control */ +#define FPSF_RC(x) (((x) & 0x3) << 4) /* rounding control */ +#define FPSF_TD (1 << 6) /* trap disabled */ + +/* floating-point status field flags: */ +#define FPSF_V (1 << 7) /* invalid operation flag */ +#define FPSF_D (1 << 8) /* denormal/unnormal operand flag */ +#define FPSF_Z (1 << 9) /* zero divide (IEEE) flag */ +#define FPSF_O (1 << 10) /* overflow (IEEE) flag */ +#define FPSF_U (1 << 11) /* underflow (IEEE) flag */ +#define FPSF_I (1 << 12) /* inexact (IEEE) flag) */ + +/* floating-point rounding control: */ +#define FPRC_NEAREST 0x0 +#define FPRC_NEGINF 0x1 +#define FPRC_POSINF 0x2 +#define FPRC_TRUNC 0x3 + +#define FPSF_DEFAULT (FPSF_PC (0x3) | FPSF_RC (FPRC_NEAREST)) + +/* This default value is the same as HP-UX uses. Don't change it + without a very good reason. */ +#define FPSR_DEFAULT (FPSR_TRAP_VD | FPSR_TRAP_DD | FPSR_TRAP_ZD \ + | FPSR_TRAP_OD | FPSR_TRAP_UD | FPSR_TRAP_ID \ + | FPSR_S0 (FPSF_DEFAULT) \ + | FPSR_S1 (FPSF_DEFAULT | FPSF_TD | FPSF_WRE) \ + | FPSR_S2 (FPSF_DEFAULT | FPSF_TD) \ + | FPSR_S3 (FPSF_DEFAULT | FPSF_TD)) + +# ifndef __ASSEMBLY__ + +struct ia64_fpreg { + union { + unsigned long bits[2]; + long double __dummy; /* force 16-byte alignment */ + } u; +}; + +# endif /* __ASSEMBLY__ */ + +#endif /* _ASM_IA64_FPU_H */ diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h new file mode 100644 index 000000000000..7c357dfbae50 --- /dev/null +++ b/include/asm-ia64/gcc_intrin.h @@ -0,0 +1,597 @@ +#ifndef _ASM_IA64_GCC_INTRIN_H +#define _ASM_IA64_GCC_INTRIN_H +/* + * + * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> + * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> + */ + +#include <linux/compiler.h> + +/* define this macro to get some asm stmts included in 'c' files */ +#define ASM_SUPPORTED + +/* Optimization barrier */ +/* The "volatile" is due to gcc bugs */ +#define ia64_barrier() asm volatile ("":::"memory") + +#define ia64_stop() asm volatile (";;"::) + +#define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum)) + +#define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum)) + +extern void ia64_bad_param_for_setreg (void); +extern void ia64_bad_param_for_getreg (void); + +register unsigned long ia64_r13 asm ("r13") __attribute_used__; + +#define ia64_setreg(regnum, val) \ +({ \ + switch (regnum) { \ + case _IA64_REG_PSR_L: \ + asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \ + break; \ + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ + asm volatile ("mov ar%0=%1" :: \ + "i" (regnum - _IA64_REG_AR_KR0), \ + "r"(val): "memory"); \ + break; \ + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ + asm volatile ("mov cr%0=%1" :: \ + "i" (regnum - _IA64_REG_CR_DCR), \ + "r"(val): "memory" ); \ + break; \ + case _IA64_REG_SP: \ + asm volatile ("mov r12=%0" :: \ + "r"(val): "memory"); \ + break; \ + case _IA64_REG_GP: \ + asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \ + break; \ + default: \ + ia64_bad_param_for_setreg(); \ + break; \ + } \ +}) + +#define ia64_getreg(regnum) \ +({ \ + __u64 ia64_intri_res; \ + \ + switch (regnum) { \ + case _IA64_REG_GP: \ + asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_IP: \ + asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_PSR: \ + asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \ + break; \ + case _IA64_REG_TP: /* for current() */ \ + ia64_intri_res = ia64_r13; \ + break; \ + case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \ + asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \ + : "i"(regnum - _IA64_REG_AR_KR0)); \ + break; \ + case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \ + asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \ + : "i" (regnum - _IA64_REG_CR_DCR)); \ + break; \ + case _IA64_REG_SP: \ + asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \ + break; \ + default: \ + ia64_bad_param_for_getreg(); \ + break; \ + } \ + ia64_intri_res; \ +}) + +#define ia64_hint_pause 0 + +#define ia64_hint(mode) \ +({ \ + switch (mode) { \ + case ia64_hint_pause: \ + asm volatile ("hint @pause" ::: "memory"); \ + break; \ + } \ +}) + + +/* Integer values for mux1 instruction */ +#define ia64_mux1_brcst 0 +#define ia64_mux1_mix 8 +#define ia64_mux1_shuf 9 +#define ia64_mux1_alt 10 +#define ia64_mux1_rev 11 + +#define ia64_mux1(x, mode) \ +({ \ + __u64 ia64_intri_res; \ + \ + switch (mode) { \ + case ia64_mux1_brcst: \ + asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_mix: \ + asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_shuf: \ + asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_alt: \ + asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + case ia64_mux1_rev: \ + asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \ + break; \ + } \ + ia64_intri_res; \ +}) + +#define ia64_popcnt(x) \ +({ \ + __u64 ia64_intri_res; \ + asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \ + \ + ia64_intri_res; \ +}) + +#define ia64_getf_exp(x) \ +({ \ + long ia64_intri_res; \ + \ + asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \ + \ + ia64_intri_res; \ +}) + +#define ia64_shrp(a, b, count) \ +({ \ + __u64 ia64_intri_res; \ + asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \ + ia64_intri_res; \ +}) + +#define ia64_ldfs(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldfd(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldfe(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldf8(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_ldf_fill(regnum, x) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \ +}) + +#define ia64_stfs(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_stfd(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_stfe(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_stf8(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_stf_spill(x, regnum) \ +({ \ + register double __f__ asm ("f"#regnum); \ + asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \ +}) + +#define ia64_fetchadd4_acq(p, inc) \ +({ \ + \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd4.acq %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_fetchadd4_rel(p, inc) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd4.rel %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_fetchadd8_acq(p, inc) \ +({ \ + \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd8.acq %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_fetchadd8_rel(p, inc) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("fetchadd8.rel %0=[%1],%2" \ + : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \ + : "memory"); \ + \ + ia64_intri_res; \ +}) + +#define ia64_xchg1(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("xchg1 %0=[%1],%2" \ + : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_xchg2(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_xchg4(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_xchg8(ptr,x) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \ + : "r" (ptr), "r" (x) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg1_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg1_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg2_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg2_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg4_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg4_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg8_acq(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_cmpxchg8_rel(ptr, new, old) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \ + \ + asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \ + "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \ + ia64_intri_res; \ +}) + +#define ia64_mf() asm volatile ("mf" ::: "memory") +#define ia64_mfa() asm volatile ("mf.a" ::: "memory") + +#define ia64_invala() asm volatile ("invala" ::: "memory") + +#define ia64_thash(addr) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ + ia64_intri_res; \ +}) + +#define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory") +#define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory"); + +#ifdef HAVE_SERIALIZE_DIRECTIVE +# define ia64_dv_serialize_data() asm volatile (".serialize.data"); +# define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction"); +#else +# define ia64_dv_serialize_data() +# define ia64_dv_serialize_instruction() +#endif + +#define ia64_nop(x) asm volatile ("nop %0"::"i"(x)); + +#define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory") + +#define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory") + + +#define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \ + :: "r"(trnum), "r"(addr) : "memory") + +#define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \ + :: "r"(trnum), "r"(addr) : "memory") + +#define ia64_tpa(addr) \ +({ \ + __u64 ia64_pa; \ + asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \ + ia64_pa; \ +}) + +#define __ia64_set_dbr(index, val) \ + asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory") + +#define ia64_set_ibr(index, val) \ + asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory") + +#define ia64_set_pkr(index, val) \ + asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory") + +#define ia64_set_pmc(index, val) \ + asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory") + +#define ia64_set_pmd(index, val) \ + asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory") + +#define ia64_set_rr(index, val) \ + asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory"); + +#define ia64_get_cpuid(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \ + ia64_intri_res; \ +}) + +#define __ia64_get_dbr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ +}) + +#define ia64_get_ibr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ +}) + +#define ia64_get_pkr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ +}) + +#define ia64_get_pmc(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ +}) + + +#define ia64_get_pmd(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \ + ia64_intri_res; \ +}) + +#define ia64_get_rr(index) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \ + ia64_intri_res; \ +}) + +#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory") + + +#define ia64_sync_i() asm volatile (";; sync.i" ::: "memory") + +#define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory") +#define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory") +#define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory") +#define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory") + +#define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr)) + +#define ia64_ptcga(addr, size) \ +do { \ + asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \ + ia64_dv_serialize_data(); \ +} while (0) + +#define ia64_ptcl(addr, size) \ +do { \ + asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \ + ia64_dv_serialize_data(); \ +} while (0) + +#define ia64_ptri(addr, size) \ + asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory") + +#define ia64_ptrd(addr, size) \ + asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory") + +/* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */ + +#define ia64_lfhint_none 0 +#define ia64_lfhint_nt1 1 +#define ia64_lfhint_nt2 2 +#define ia64_lfhint_nta 3 + +#define ia64_lfetch(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + asm volatile ("lfetch [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + asm volatile ("lfetch.nta [%0]" : : "r"(y)); \ + break; \ + } \ +}) + +#define ia64_lfetch_excl(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + asm volatile ("lfetch.excl [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \ + break; \ + } \ +}) + +#define ia64_lfetch_fault(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + asm volatile ("lfetch.fault [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \ + break; \ + } \ +}) + +#define ia64_lfetch_fault_excl(lfhint, y) \ +({ \ + switch (lfhint) { \ + case ia64_lfhint_none: \ + asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nt1: \ + asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nt2: \ + asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \ + break; \ + case ia64_lfhint_nta: \ + asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \ + break; \ + } \ +}) + +#define ia64_intrin_local_irq_restore(x) \ +do { \ + asm volatile (";; cmp.ne p6,p7=%0,r0;;" \ + "(p6) ssm psr.i;" \ + "(p7) rsm psr.i;;" \ + "(p6) srlz.d" \ + :: "r"((x)) : "p6", "p7", "memory"); \ +} while (0) + +#endif /* _ASM_IA64_GCC_INTRIN_H */ diff --git a/include/asm-ia64/hardirq.h b/include/asm-ia64/hardirq.h new file mode 100644 index 000000000000..33ef8f096d95 --- /dev/null +++ b/include/asm-ia64/hardirq.h @@ -0,0 +1,38 @@ +#ifndef _ASM_IA64_HARDIRQ_H +#define _ASM_IA64_HARDIRQ_H + +/* + * Modified 1998-2002, 2004 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/config.h> + +#include <linux/threads.h> +#include <linux/irq.h> + +#include <asm/processor.h> + +/* + * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. + */ + +#define __ARCH_IRQ_STAT 1 + +#define local_softirq_pending() (local_cpu_data->softirq_pending) + +#define HARDIRQ_BITS 14 + +/* + * The hardirq mask has to be large enough to have space for potentially all IRQ sources + * in the system nesting on a single CPU: + */ +#if (1 << HARDIRQ_BITS) < NR_IRQS +# error HARDIRQ_BITS is too low! +#endif + +extern void __iomem *ipi_base_addr; + +void ack_bad_irq(unsigned int irq); + +#endif /* _ASM_IA64_HARDIRQ_H */ diff --git a/include/asm-ia64/hdreg.h b/include/asm-ia64/hdreg.h new file mode 100644 index 000000000000..83b5161d2678 --- /dev/null +++ b/include/asm-ia64/hdreg.h @@ -0,0 +1,14 @@ +/* + * linux/include/asm-ia64/hdreg.h + * + * Copyright (C) 1994-1996 Linus Torvalds & authors + */ + +#warning this file is obsolete, please do not use it + +#ifndef __ASM_IA64_HDREG_H +#define __ASM_IA64_HDREG_H + +typedef unsigned short ide_ioreg_t; + +#endif /* __ASM_IA64_HDREG_H */ diff --git a/include/asm-ia64/hw_irq.h b/include/asm-ia64/hw_irq.h new file mode 100644 index 000000000000..041ab8c51a64 --- /dev/null +++ b/include/asm-ia64/hw_irq.h @@ -0,0 +1,144 @@ +#ifndef _ASM_IA64_HW_IRQ_H +#define _ASM_IA64_HW_IRQ_H + +/* + * Copyright (C) 2001-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/profile.h> + +#include <asm/machvec.h> +#include <asm/ptrace.h> +#include <asm/smp.h> + +typedef u8 ia64_vector; + +/* + * 0 special + * + * 1,3-14 are reserved from firmware + * + * 16-255 (vectored external interrupts) are available + * + * 15 spurious interrupt (see IVR) + * + * 16 lowest priority, 255 highest priority + * + * 15 classes of 16 interrupts each. + */ +#define IA64_MIN_VECTORED_IRQ 16 +#define IA64_MAX_VECTORED_IRQ 255 +#define IA64_NUM_VECTORS 256 + +#define AUTO_ASSIGN -1 + +#define IA64_SPURIOUS_INT_VECTOR 0x0f + +/* + * Vectors 0x10-0x1f are used for low priority interrupts, e.g. CMCI. + */ +#define IA64_CPEP_VECTOR 0x1c /* corrected platform error polling vector */ +#define IA64_CMCP_VECTOR 0x1d /* corrected machine-check polling vector */ +#define IA64_CPE_VECTOR 0x1e /* corrected platform error interrupt vector */ +#define IA64_CMC_VECTOR 0x1f /* corrected machine-check interrupt vector */ +/* + * Vectors 0x20-0x2f are reserved for legacy ISA IRQs. + */ +#define IA64_FIRST_DEVICE_VECTOR 0x30 +#define IA64_LAST_DEVICE_VECTOR 0xe7 +#define IA64_NUM_DEVICE_VECTORS (IA64_LAST_DEVICE_VECTOR - IA64_FIRST_DEVICE_VECTOR + 1) + +#define IA64_MCA_RENDEZ_VECTOR 0xe8 /* MCA rendez interrupt */ +#define IA64_PERFMON_VECTOR 0xee /* performanc monitor interrupt vector */ +#define IA64_TIMER_VECTOR 0xef /* use highest-prio group 15 interrupt for timer */ +#define IA64_MCA_WAKEUP_VECTOR 0xf0 /* MCA wakeup (must be >MCA_RENDEZ_VECTOR) */ +#define IA64_IPI_RESCHEDULE 0xfd /* SMP reschedule */ +#define IA64_IPI_VECTOR 0xfe /* inter-processor interrupt vector */ + +/* Used for encoding redirected irqs */ + +#define IA64_IRQ_REDIRECTED (1 << 31) + +/* IA64 inter-cpu interrupt related definitions */ + +#define IA64_IPI_DEFAULT_BASE_ADDR 0xfee00000 + +/* Delivery modes for inter-cpu interrupts */ +enum { + IA64_IPI_DM_INT = 0x0, /* pend an external interrupt */ + IA64_IPI_DM_PMI = 0x2, /* pend a PMI */ + IA64_IPI_DM_NMI = 0x4, /* pend an NMI (vector 2) */ + IA64_IPI_DM_INIT = 0x5, /* pend an INIT interrupt */ + IA64_IPI_DM_EXTINT = 0x7, /* pend an 8259-compatible interrupt. */ +}; + +extern __u8 isa_irq_to_vector_map[16]; +#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)] + +extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ + +extern int assign_irq_vector (int irq); /* allocate a free vector */ +extern void free_irq_vector (int vector); +extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); +extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); + +static inline void +hw_resend_irq (struct hw_interrupt_type *h, unsigned int vector) +{ + platform_send_ipi(smp_processor_id(), vector, IA64_IPI_DM_INT, 0); +} + +/* + * Default implementations for the irq-descriptor API: + */ + +extern irq_desc_t irq_desc[NR_IRQS]; + +#ifndef CONFIG_IA64_GENERIC +static inline unsigned int +__ia64_local_vector_to_irq (ia64_vector vec) +{ + return (unsigned int) vec; +} +#endif + +/* + * Next follows the irq descriptor interface. On IA-64, each CPU supports 256 interrupt + * vectors. On smaller systems, there is a one-to-one correspondence between interrupt + * vectors and the Linux irq numbers. However, larger systems may have multiple interrupt + * domains meaning that the translation from vector number to irq number depends on the + * interrupt domain that a CPU belongs to. This API abstracts such platform-dependent + * differences and provides a uniform means to translate between vector and irq numbers + * and to obtain the irq descriptor for a given irq number. + */ + +/* Return a pointer to the irq descriptor for IRQ. */ +static inline irq_desc_t * +irq_descp (int irq) +{ + return irq_desc + irq; +} + +/* Extract the IA-64 vector that corresponds to IRQ. */ +static inline ia64_vector +irq_to_vector (int irq) +{ + return (ia64_vector) irq; +} + +/* + * Convert the local IA-64 vector to the corresponding irq number. This translation is + * done in the context of the interrupt domain that the currently executing CPU belongs + * to. + */ +static inline unsigned int +local_vector_to_irq (ia64_vector vec) +{ + return platform_local_vector_to_irq(vec); +} + +#endif /* _ASM_IA64_HW_IRQ_H */ diff --git a/include/asm-ia64/ia32.h b/include/asm-ia64/ia32.h new file mode 100644 index 000000000000..8e746b2413a6 --- /dev/null +++ b/include/asm-ia64/ia32.h @@ -0,0 +1,38 @@ +#ifndef _ASM_IA64_IA32_H +#define _ASM_IA64_IA32_H + +#include <linux/config.h> + +#include <asm/ptrace.h> +#include <asm/signal.h> + +#define IA32_NR_syscalls 285 /* length of syscall table */ +#define IA32_PAGE_SHIFT 12 /* 4KB pages */ + +#ifndef __ASSEMBLY__ + +# ifdef CONFIG_IA32_SUPPORT + +extern void ia32_cpu_init (void); +extern void ia32_mem_init (void); +extern void ia32_gdt_init (void); +extern int ia32_exception (struct pt_regs *regs, unsigned long isr); +extern int ia32_intercept (struct pt_regs *regs, unsigned long isr); +extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs); + +# endif /* !CONFIG_IA32_SUPPORT */ + +/* Declare this unconditionally, so we don't get warnings for unreachable code. */ +extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs); +#if PAGE_SHIFT > IA32_PAGE_SHIFT +extern int ia32_copy_partial_page_list (struct task_struct *, unsigned long); +extern void ia32_drop_partial_page_list (struct task_struct *); +#else +# define ia32_copy_partial_page_list(a1, a2) 0 +# define ia32_drop_partial_page_list(a1) do { ; } while (0) +#endif + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_IA64_IA32_H */ diff --git a/include/asm-ia64/ia64regs.h b/include/asm-ia64/ia64regs.h new file mode 100644 index 000000000000..1757f1c11ad4 --- /dev/null +++ b/include/asm-ia64/ia64regs.h @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2002,2003 Intel Corp. + * Jun Nakajima <jun.nakajima@intel.com> + * Suresh Siddha <suresh.b.siddha@intel.com> + */ + +#ifndef _ASM_IA64_IA64REGS_H +#define _ASM_IA64_IA64REGS_H + +/* + * Register Names for getreg() and setreg(). + * + * The "magic" numbers happen to match the values used by the Intel compiler's + * getreg()/setreg() intrinsics. + */ + +/* Special Registers */ + +#define _IA64_REG_IP 1016 /* getreg only */ +#define _IA64_REG_PSR 1019 +#define _IA64_REG_PSR_L 1019 + +/* General Integer Registers */ + +#define _IA64_REG_GP 1025 /* R1 */ +#define _IA64_REG_R8 1032 /* R8 */ +#define _IA64_REG_R9 1033 /* R9 */ +#define _IA64_REG_SP 1036 /* R12 */ +#define _IA64_REG_TP 1037 /* R13 */ + +/* Application Registers */ + +#define _IA64_REG_AR_KR0 3072 +#define _IA64_REG_AR_KR1 3073 +#define _IA64_REG_AR_KR2 3074 +#define _IA64_REG_AR_KR3 3075 +#define _IA64_REG_AR_KR4 3076 +#define _IA64_REG_AR_KR5 3077 +#define _IA64_REG_AR_KR6 3078 +#define _IA64_REG_AR_KR7 3079 +#define _IA64_REG_AR_RSC 3088 +#define _IA64_REG_AR_BSP 3089 +#define _IA64_REG_AR_BSPSTORE 3090 +#define _IA64_REG_AR_RNAT 3091 +#define _IA64_REG_AR_FCR 3093 +#define _IA64_REG_AR_EFLAG 3096 +#define _IA64_REG_AR_CSD 3097 +#define _IA64_REG_AR_SSD 3098 +#define _IA64_REG_AR_CFLAG 3099 +#define _IA64_REG_AR_FSR 3100 +#define _IA64_REG_AR_FIR 3101 +#define _IA64_REG_AR_FDR 3102 +#define _IA64_REG_AR_CCV 3104 +#define _IA64_REG_AR_UNAT 3108 +#define _IA64_REG_AR_FPSR 3112 +#define _IA64_REG_AR_ITC 3116 +#define _IA64_REG_AR_PFS 3136 +#define _IA64_REG_AR_LC 3137 +#define _IA64_REG_AR_EC 3138 + +/* Control Registers */ + +#define _IA64_REG_CR_DCR 4096 +#define _IA64_REG_CR_ITM 4097 +#define _IA64_REG_CR_IVA 4098 +#define _IA64_REG_CR_PTA 4104 +#define _IA64_REG_CR_IPSR 4112 +#define _IA64_REG_CR_ISR 4113 +#define _IA64_REG_CR_IIP 4115 +#define _IA64_REG_CR_IFA 4116 +#define _IA64_REG_CR_ITIR 4117 +#define _IA64_REG_CR_IIPA 4118 +#define _IA64_REG_CR_IFS 4119 +#define _IA64_REG_CR_IIM 4120 +#define _IA64_REG_CR_IHA 4121 +#define _IA64_REG_CR_LID 4160 +#define _IA64_REG_CR_IVR 4161 /* getreg only */ +#define _IA64_REG_CR_TPR 4162 +#define _IA64_REG_CR_EOI 4163 +#define _IA64_REG_CR_IRR0 4164 /* getreg only */ +#define _IA64_REG_CR_IRR1 4165 /* getreg only */ +#define _IA64_REG_CR_IRR2 4166 /* getreg only */ +#define _IA64_REG_CR_IRR3 4167 /* getreg only */ +#define _IA64_REG_CR_ITV 4168 +#define _IA64_REG_CR_PMV 4169 +#define _IA64_REG_CR_CMCV 4170 +#define _IA64_REG_CR_LRR0 4176 +#define _IA64_REG_CR_LRR1 4177 + +/* Indirect Registers for getindreg() and setindreg() */ + +#define _IA64_REG_INDR_CPUID 9000 /* getindreg only */ +#define _IA64_REG_INDR_DBR 9001 +#define _IA64_REG_INDR_IBR 9002 +#define _IA64_REG_INDR_PKR 9003 +#define _IA64_REG_INDR_PMC 9004 +#define _IA64_REG_INDR_PMD 9005 +#define _IA64_REG_INDR_RR 9006 + +#endif /* _ASM_IA64_IA64REGS_H */ diff --git a/include/asm-ia64/ide.h b/include/asm-ia64/ide.h new file mode 100644 index 000000000000..e62b95301d51 --- /dev/null +++ b/include/asm-ia64/ide.h @@ -0,0 +1,71 @@ +/* + * linux/include/asm-ia64/ide.h + * + * Copyright (C) 1994-1996 Linus Torvalds & authors + */ + +/* + * This file contains the ia64 architecture specific IDE code. + */ + +#ifndef __ASM_IA64_IDE_H +#define __ASM_IA64_IDE_H + +#ifdef __KERNEL__ + +#include <linux/config.h> + +#include <linux/irq.h> + +#ifndef MAX_HWIFS +# ifdef CONFIG_PCI +#define MAX_HWIFS 10 +# else +#define MAX_HWIFS 6 +# endif +#endif + +#define IDE_ARCH_OBSOLETE_DEFAULTS + +static inline int ide_default_irq(unsigned long base) +{ + switch (base) { + case 0x1f0: return isa_irq_to_vector(14); + case 0x170: return isa_irq_to_vector(15); + case 0x1e8: return isa_irq_to_vector(11); + case 0x168: return isa_irq_to_vector(10); + case 0x1e0: return isa_irq_to_vector(8); + case 0x160: return isa_irq_to_vector(12); + default: + return 0; + } +} + +static inline unsigned long ide_default_io_base(int index) +{ + switch (index) { + case 0: return 0x1f0; + case 1: return 0x170; + case 2: return 0x1e8; + case 3: return 0x168; + case 4: return 0x1e0; + case 5: return 0x160; + default: + return 0; + } +} + +#define IDE_ARCH_OBSOLETE_INIT +#define ide_default_io_ctl(base) ((base) + 0x206) /* obsolete */ + +#ifdef CONFIG_PCI +#define ide_init_default_irq(base) (0) +#else +#define ide_init_default_irq(base) ide_default_irq(base) +#endif + +#include <asm-generic/ide_iops.h> + +#endif /* __KERNEL__ */ + +#endif /* __ASM_IA64_IDE_H */ diff --git a/include/asm-ia64/intel_intrin.h b/include/asm-ia64/intel_intrin.h new file mode 100644 index 000000000000..a7122d850177 --- /dev/null +++ b/include/asm-ia64/intel_intrin.h @@ -0,0 +1,257 @@ +#ifndef _ASM_IA64_INTEL_INTRIN_H +#define _ASM_IA64_INTEL_INTRIN_H +/* + * Intel Compiler Intrinsics + * + * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com> + * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com> + * + */ +#include <asm/types.h> + +void __lfetch(int lfhint, void *y); +void __lfetch_excl(int lfhint, void *y); +void __lfetch_fault(int lfhint, void *y); +void __lfetch_fault_excl(int lfhint, void *y); + +/* In the following, whichFloatReg should be an integer from 0-127 */ +void __ldfs(const int whichFloatReg, void *src); +void __ldfd(const int whichFloatReg, void *src); +void __ldfe(const int whichFloatReg, void *src); +void __ldf8(const int whichFloatReg, void *src); +void __ldf_fill(const int whichFloatReg, void *src); +void __stfs(void *dst, const int whichFloatReg); +void __stfd(void *dst, const int whichFloatReg); +void __stfe(void *dst, const int whichFloatReg); +void __stf8(void *dst, const int whichFloatReg); +void __stf_spill(void *dst, const int whichFloatReg); + +void __st1_rel(void *dst, const __s8 value); +void __st2_rel(void *dst, const __s16 value); +void __st4_rel(void *dst, const __s32 value); +void __st8_rel(void *dst, const __s64 value); +__u8 __ld1_acq(void *src); +__u16 __ld2_acq(void *src); +__u32 __ld4_acq(void *src); +__u64 __ld8_acq(void *src); + +__u64 __fetchadd4_acq(__u32 *addend, const int increment); +__u64 __fetchadd4_rel(__u32 *addend, const int increment); +__u64 __fetchadd8_acq(__u64 *addend, const int increment); +__u64 __fetchadd8_rel(__u64 *addend, const int increment); + +__u64 __getf_exp(double d); + +/* OS Related Itanium(R) Intrinsics */ + +/* The names to use for whichReg and whichIndReg below come from + the include file asm/ia64regs.h */ + +__u64 __getIndReg(const int whichIndReg, __s64 index); +__u64 __getReg(const int whichReg); + +void __setIndReg(const int whichIndReg, __s64 index, __u64 value); +void __setReg(const int whichReg, __u64 value); + +void __mf(void); +void __mfa(void); +void __synci(void); +void __itcd(__s64 pa); +void __itci(__s64 pa); +void __itrd(__s64 whichTransReg, __s64 pa); +void __itri(__s64 whichTransReg, __s64 pa); +void __ptce(__s64 va); +void __ptcl(__s64 va, __s64 pagesz); +void __ptcg(__s64 va, __s64 pagesz); +void __ptcga(__s64 va, __s64 pagesz); +void __ptri(__s64 va, __s64 pagesz); +void __ptrd(__s64 va, __s64 pagesz); +void __invala (void); +void __invala_gr(const int whichGeneralReg /* 0-127 */ ); +void __invala_fr(const int whichFloatReg /* 0-127 */ ); +void __nop(const int); +void __fc(__u64 *addr); +void __sum(int mask); +void __rum(int mask); +void __ssm(int mask); +void __rsm(int mask); +__u64 __thash(__s64); +__u64 __ttag(__s64); +__s64 __tpa(__s64); + +/* Intrinsics for implementing get/put_user macros */ +void __st_user(const char *tableName, __u64 addr, char size, char relocType, __u64 val); +void __ld_user(const char *tableName, __u64 addr, char size, char relocType); + +/* This intrinsic does not generate code, it creates a barrier across which + * the compiler will not schedule data access instructions. + */ +void __memory_barrier(void); + +void __isrlz(void); +void __dsrlz(void); + +__u64 _m64_mux1(__u64 a, const int n); +__u64 __thash(__u64); + +/* Lock and Atomic Operation Related Intrinsics */ +__u64 _InterlockedExchange8(volatile __u8 *trgt, __u8 value); +__u64 _InterlockedExchange16(volatile __u16 *trgt, __u16 value); +__s64 _InterlockedExchange(volatile __u32 *trgt, __u32 value); +__s64 _InterlockedExchange64(volatile __u64 *trgt, __u64 value); + +__u64 _InterlockedCompareExchange8_rel(volatile __u8 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange8_acq(volatile __u8 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange16_rel(volatile __u16 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange16_acq(volatile __u16 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange_rel(volatile __u32 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange_acq(volatile __u32 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange64_rel(volatile __u64 *dest, __u64 xchg, __u64 comp); +__u64 _InterlockedCompareExchange64_acq(volatile __u64 *dest, __u64 xchg, __u64 comp); + +__s64 _m64_dep_mi(const int v, __s64 s, const int p, const int len); +__s64 _m64_shrp(__s64 a, __s64 b, const int count); +__s64 _m64_popcnt(__s64 a); + +#define ia64_barrier() __memory_barrier() + +#define ia64_stop() /* Nothing: As of now stop bit is generated for each + * intrinsic + */ + +#define ia64_getreg __getReg +#define ia64_setreg __setReg + +#define ia64_hint(x) + +#define ia64_mux1_brcst 0 +#define ia64_mux1_mix 8 +#define ia64_mux1_shuf 9 +#define ia64_mux1_alt 10 +#define ia64_mux1_rev 11 + +#define ia64_mux1 _m64_mux1 +#define ia64_popcnt _m64_popcnt +#define ia64_getf_exp __getf_exp +#define ia64_shrp _m64_shrp + +#define ia64_tpa __tpa +#define ia64_invala __invala +#define ia64_invala_gr __invala_gr +#define ia64_invala_fr __invala_fr +#define ia64_nop __nop +#define ia64_sum __sum +#define ia64_ssm __ssm +#define ia64_rum __rum +#define ia64_rsm __rsm +#define ia64_fc __fc + +#define ia64_ldfs __ldfs +#define ia64_ldfd __ldfd +#define ia64_ldfe __ldfe +#define ia64_ldf8 __ldf8 +#define ia64_ldf_fill __ldf_fill + +#define ia64_stfs __stfs +#define ia64_stfd __stfd +#define ia64_stfe __stfe +#define ia64_stf8 __stf8 +#define ia64_stf_spill __stf_spill + +#define ia64_mf __mf +#define ia64_mfa __mfa + +#define ia64_fetchadd4_acq __fetchadd4_acq +#define ia64_fetchadd4_rel __fetchadd4_rel +#define ia64_fetchadd8_acq __fetchadd8_acq +#define ia64_fetchadd8_rel __fetchadd8_rel + +#define ia64_xchg1 _InterlockedExchange8 +#define ia64_xchg2 _InterlockedExchange16 +#define ia64_xchg4 _InterlockedExchange +#define ia64_xchg8 _InterlockedExchange64 + +#define ia64_cmpxchg1_rel _InterlockedCompareExchange8_rel +#define ia64_cmpxchg1_acq _InterlockedCompareExchange8_acq +#define ia64_cmpxchg2_rel _InterlockedCompareExchange16_rel +#define ia64_cmpxchg2_acq _InterlockedCompareExchange16_acq +#define ia64_cmpxchg4_rel _InterlockedCompareExchange_rel +#define ia64_cmpxchg4_acq _InterlockedCompareExchange_acq +#define ia64_cmpxchg8_rel _InterlockedCompareExchange64_rel +#define ia64_cmpxchg8_acq _InterlockedCompareExchange64_acq + +#define __ia64_set_dbr(index, val) \ + __setIndReg(_IA64_REG_INDR_DBR, index, val) +#define ia64_set_ibr(index, val) \ + __setIndReg(_IA64_REG_INDR_IBR, index, val) +#define ia64_set_pkr(index, val) \ + __setIndReg(_IA64_REG_INDR_PKR, index, val) +#define ia64_set_pmc(index, val) \ + __setIndReg(_IA64_REG_INDR_PMC, index, val) +#define ia64_set_pmd(index, val) \ + __setIndReg(_IA64_REG_INDR_PMD, index, val) +#define ia64_set_rr(index, val) \ + __setIndReg(_IA64_REG_INDR_RR, index, val) + +#define ia64_get_cpuid(index) __getIndReg(_IA64_REG_INDR_CPUID, index) +#define __ia64_get_dbr(index) __getIndReg(_IA64_REG_INDR_DBR, index) +#define ia64_get_ibr(index) __getIndReg(_IA64_REG_INDR_IBR, index) +#define ia64_get_pkr(index) __getIndReg(_IA64_REG_INDR_PKR, index) +#define ia64_get_pmc(index) __getIndReg(_IA64_REG_INDR_PMC, index) +#define ia64_get_pmd(index) __getIndReg(_IA64_REG_INDR_PMD, index) +#define ia64_get_rr(index) __getIndReg(_IA64_REG_INDR_RR, index) + +#define ia64_srlz_d __dsrlz +#define ia64_srlz_i __isrlz + +#define ia64_dv_serialize_data() +#define ia64_dv_serialize_instruction() + +#define ia64_st1_rel __st1_rel +#define ia64_st2_rel __st2_rel +#define ia64_st4_rel __st4_rel +#define ia64_st8_rel __st8_rel + +#define ia64_ld1_acq __ld1_acq +#define ia64_ld2_acq __ld2_acq +#define ia64_ld4_acq __ld4_acq +#define ia64_ld8_acq __ld8_acq + +#define ia64_sync_i __synci +#define ia64_thash __thash +#define ia64_ttag __ttag +#define ia64_itcd __itcd +#define ia64_itci __itci +#define ia64_itrd __itrd +#define ia64_itri __itri +#define ia64_ptce __ptce +#define ia64_ptcl __ptcl +#define ia64_ptcg __ptcg +#define ia64_ptcga __ptcga +#define ia64_ptri __ptri +#define ia64_ptrd __ptrd +#define ia64_dep_mi _m64_dep_mi + +/* Values for lfhint in __lfetch and __lfetch_fault */ + +#define ia64_lfhint_none 0 +#define ia64_lfhint_nt1 1 +#define ia64_lfhint_nt2 2 +#define ia64_lfhint_nta 3 + +#define ia64_lfetch __lfetch +#define ia64_lfetch_excl __lfetch_excl +#define ia64_lfetch_fault __lfetch_fault +#define ia64_lfetch_fault_excl __lfetch_fault_excl + +#define ia64_intrin_local_irq_restore(x) \ +do { \ + if ((x) != 0) { \ + ia64_ssm(IA64_PSR_I); \ + ia64_srlz_d(); \ + } else { \ + ia64_rsm(IA64_PSR_I); \ + } \ +} while (0) + +#endif /* _ASM_IA64_INTEL_INTRIN_H */ diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h new file mode 100644 index 000000000000..8089f955e5d2 --- /dev/null +++ b/include/asm-ia64/intrinsics.h @@ -0,0 +1,181 @@ +#ifndef _ASM_IA64_INTRINSICS_H +#define _ASM_IA64_INTRINSICS_H + +/* + * Compiler-dependent intrinsics. + * + * Copyright (C) 2002-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#ifndef __ASSEMBLY__ +#include <linux/config.h> + +/* include compiler specific intrinsics */ +#include <asm/ia64regs.h> +#ifdef __INTEL_COMPILER +# include <asm/intel_intrin.h> +#else +# include <asm/gcc_intrin.h> +#endif + +/* + * Force an unresolved reference if someone tries to use + * ia64_fetch_and_add() with a bad value. + */ +extern unsigned long __bad_size_for_ia64_fetch_and_add (void); +extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); + +#define IA64_FETCHADD(tmp,v,n,sz,sem) \ +({ \ + switch (sz) { \ + case 4: \ + tmp = ia64_fetchadd4_##sem((unsigned int *) v, n); \ + break; \ + \ + case 8: \ + tmp = ia64_fetchadd8_##sem((unsigned long *) v, n); \ + break; \ + \ + default: \ + __bad_size_for_ia64_fetch_and_add(); \ + } \ +}) + +#define ia64_fetchadd(i,v,sem) \ +({ \ + __u64 _tmp; \ + volatile __typeof__(*(v)) *_v = (v); \ + /* Can't use a switch () here: gcc isn't always smart enough for that... */ \ + if ((i) == -16) \ + IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \ + else if ((i) == -8) \ + IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \ + else if ((i) == -4) \ + IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \ + else if ((i) == -1) \ + IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \ + else if ((i) == 1) \ + IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \ + else if ((i) == 4) \ + IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \ + else if ((i) == 8) \ + IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \ + else if ((i) == 16) \ + IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \ + else \ + _tmp = __bad_increment_for_ia64_fetch_and_add(); \ + (__typeof__(*(v))) (_tmp); /* return old value */ \ +}) + +#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */ + +/* + * This function doesn't exist, so you'll get a linker error if + * something tries to do an invalid xchg(). + */ +extern void ia64_xchg_called_with_bad_pointer (void); + +#define __xchg(x,ptr,size) \ +({ \ + unsigned long __xchg_result; \ + \ + switch (size) { \ + case 1: \ + __xchg_result = ia64_xchg1((__u8 *)ptr, x); \ + break; \ + \ + case 2: \ + __xchg_result = ia64_xchg2((__u16 *)ptr, x); \ + break; \ + \ + case 4: \ + __xchg_result = ia64_xchg4((__u32 *)ptr, x); \ + break; \ + \ + case 8: \ + __xchg_result = ia64_xchg8((__u64 *)ptr, x); \ + break; \ + default: \ + ia64_xchg_called_with_bad_pointer(); \ + } \ + __xchg_result; \ +}) + +#define xchg(ptr,x) \ + ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr)))) + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ + +#define __HAVE_ARCH_CMPXCHG 1 + +/* + * This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid cmpxchg(). + */ +extern long ia64_cmpxchg_called_with_bad_pointer (void); + +#define ia64_cmpxchg(sem,ptr,old,new,size) \ +({ \ + __u64 _o_, _r_; \ + \ + switch (size) { \ + case 1: _o_ = (__u8 ) (long) (old); break; \ + case 2: _o_ = (__u16) (long) (old); break; \ + case 4: _o_ = (__u32) (long) (old); break; \ + case 8: _o_ = (__u64) (long) (old); break; \ + default: break; \ + } \ + switch (size) { \ + case 1: \ + _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \ + break; \ + \ + case 2: \ + _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \ + break; \ + \ + case 4: \ + _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \ + break; \ + \ + case 8: \ + _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \ + break; \ + \ + default: \ + _r_ = ia64_cmpxchg_called_with_bad_pointer(); \ + break; \ + } \ + (__typeof__(old)) _r_; \ +}) + +#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr))) +#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr))) + +/* for compatibility with other platforms: */ +#define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n) + +#ifdef CONFIG_IA64_DEBUG_CMPXCHG +# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128; +# define CMPXCHG_BUGCHECK(v) \ + do { \ + if (_cmpxchg_bugcheck_count-- <= 0) { \ + void *ip; \ + extern int printk(const char *fmt, ...); \ + ip = (void *) ia64_getreg(_IA64_REG_IP); \ + printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \ + break; \ + } \ + } while (0) +#else /* !CONFIG_IA64_DEBUG_CMPXCHG */ +# define CMPXCHG_BUGCHECK_DECL +# define CMPXCHG_BUGCHECK(v) +#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */ + +#endif +#endif /* _ASM_IA64_INTRINSICS_H */ diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h new file mode 100644 index 000000000000..491e9d1fc538 --- /dev/null +++ b/include/asm-ia64/io.h @@ -0,0 +1,484 @@ +#ifndef _ASM_IA64_IO_H +#define _ASM_IA64_IO_H + +/* + * This file contains the definitions for the emulated IO instructions + * inb/inw/inl/outb/outw/outl and the "string versions" of the same + * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" + * versions of the single-IO instructions (inb_p/inw_p/..). + * + * This file is not meant to be obfuscating: it's just complicated to + * (a) handle it all in a way that makes gcc able to optimize it as + * well as possible and (b) trying to avoid writing the same thing + * over and over again with slight variations and possibly making a + * mistake somewhere. + * + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> + */ + +/* We don't use IO slowdowns on the ia64, but.. */ +#define __SLOW_DOWN_IO do { } while (0) +#define SLOW_DOWN_IO do { } while (0) + +#define __IA64_UNCACHED_OFFSET 0xc000000000000000UL /* region 6 */ + +/* + * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but + * large machines may have multiple other I/O spaces so we can't place any a priori limit + * on IO_SPACE_LIMIT. These additional spaces are described in ACPI. + */ +#define IO_SPACE_LIMIT 0xffffffffffffffffUL + +#define MAX_IO_SPACES_BITS 4 +#define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS) +#define IO_SPACE_BITS 24 +#define IO_SPACE_SIZE (1UL << IO_SPACE_BITS) + +#define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS) +#define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS) +#define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1)) + +#define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | (p & 0xfff)) + +struct io_space { + unsigned long mmio_base; /* base in MMIO space */ + int sparse; +}; + +extern struct io_space io_space[]; +extern unsigned int num_io_spaces; + +# ifdef __KERNEL__ + +/* + * All MMIO iomem cookies are in region 6; anything less is a PIO cookie: + * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap) + * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port) + * + * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch + * code that uses bare port numbers without the prerequisite pci_iomap(). + */ +#define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS)) +#define PIO_MASK (PIO_OFFSET - 1) +#define PIO_RESERVED __IA64_UNCACHED_OFFSET +#define HAVE_ARCH_PIO_SIZE + +#include <asm/intrinsics.h> +#include <asm/machvec.h> +#include <asm/page.h> +#include <asm/system.h> +#include <asm-generic/iomap.h> + +/* + * Change virtual addresses to physical addresses and vv. + */ +static inline unsigned long +virt_to_phys (volatile void *address) +{ + return (unsigned long) address - PAGE_OFFSET; +} + +static inline void* +phys_to_virt (unsigned long address) +{ + return (void *) (address + PAGE_OFFSET); +} + +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */ + +/* + * The following two macros are deprecated and scheduled for removal. + * Please use the PCI-DMA interface defined in <asm/pci.h> instead. + */ +#define bus_to_virt phys_to_virt +#define virt_to_bus virt_to_phys +#define page_to_bus page_to_phys + +# endif /* KERNEL */ + +/* + * Memory fence w/accept. This should never be used in code that is + * not IA-64 specific. + */ +#define __ia64_mf_a() ia64_mfa() + +/** + * ___ia64_mmiowb - I/O write barrier + * + * Ensure ordering of I/O space writes. This will make sure that writes + * following the barrier will arrive after all previous writes. For most + * ia64 platforms, this is a simple 'mf.a' instruction. + * + * See Documentation/DocBook/deviceiobook.tmpl for more information. + */ +static inline void ___ia64_mmiowb(void) +{ + ia64_mfa(); +} + +static inline const unsigned long +__ia64_get_io_port_base (void) +{ + extern unsigned long ia64_iobase; + + return ia64_iobase; +} + +static inline void* +__ia64_mk_io_addr (unsigned long port) +{ + struct io_space *space; + unsigned long offset; + + space = &io_space[IO_SPACE_NR(port)]; + port = IO_SPACE_PORT(port); + if (space->sparse) + offset = IO_SPACE_SPARSE_ENCODING(port); + else + offset = port; + + return (void *) (space->mmio_base | offset); +} + +#define __ia64_inb ___ia64_inb +#define __ia64_inw ___ia64_inw +#define __ia64_inl ___ia64_inl +#define __ia64_outb ___ia64_outb +#define __ia64_outw ___ia64_outw +#define __ia64_outl ___ia64_outl +#define __ia64_readb ___ia64_readb +#define __ia64_readw ___ia64_readw +#define __ia64_readl ___ia64_readl +#define __ia64_readq ___ia64_readq +#define __ia64_readb_relaxed ___ia64_readb +#define __ia64_readw_relaxed ___ia64_readw +#define __ia64_readl_relaxed ___ia64_readl +#define __ia64_readq_relaxed ___ia64_readq +#define __ia64_writeb ___ia64_writeb +#define __ia64_writew ___ia64_writew +#define __ia64_writel ___ia64_writel +#define __ia64_writeq ___ia64_writeq +#define __ia64_mmiowb ___ia64_mmiowb + +/* + * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure + * that the access has completed before executing other I/O accesses. Since we're doing + * the accesses through an uncachable (UC) translation, the CPU will execute them in + * program order. However, we still need to tell the compiler not to shuffle them around + * during optimization, which is why we use "volatile" pointers. + */ + +static inline unsigned int +___ia64_inb (unsigned long port) +{ + volatile unsigned char *addr = __ia64_mk_io_addr(port); + unsigned char ret; + + ret = *addr; + __ia64_mf_a(); + return ret; +} + +static inline unsigned int +___ia64_inw (unsigned long port) +{ + volatile unsigned short *addr = __ia64_mk_io_addr(port); + unsigned short ret; + + ret = *addr; + __ia64_mf_a(); + return ret; +} + +static inline unsigned int +___ia64_inl (unsigned long port) +{ + volatile unsigned int *addr = __ia64_mk_io_addr(port); + unsigned int ret; + + ret = *addr; + __ia64_mf_a(); + return ret; +} + +static inline void +___ia64_outb (unsigned char val, unsigned long port) +{ + volatile unsigned char *addr = __ia64_mk_io_addr(port); + + *addr = val; + __ia64_mf_a(); +} + +static inline void +___ia64_outw (unsigned short val, unsigned long port) +{ + volatile unsigned short *addr = __ia64_mk_io_addr(port); + + *addr = val; + __ia64_mf_a(); +} + +static inline void +___ia64_outl (unsigned int val, unsigned long port) +{ + volatile unsigned int *addr = __ia64_mk_io_addr(port); + + *addr = val; + __ia64_mf_a(); +} + +static inline void +__insb (unsigned long port, void *dst, unsigned long count) +{ + unsigned char *dp = dst; + + while (count--) + *dp++ = platform_inb(port); +} + +static inline void +__insw (unsigned long port, void *dst, unsigned long count) +{ + unsigned short *dp = dst; + + while (count--) + *dp++ = platform_inw(port); +} + +static inline void +__insl (unsigned long port, void *dst, unsigned long count) +{ + unsigned int *dp = dst; + + while (count--) + *dp++ = platform_inl(port); +} + +static inline void +__outsb (unsigned long port, const void *src, unsigned long count) +{ + const unsigned char *sp = src; + + while (count--) + platform_outb(*sp++, port); +} + +static inline void +__outsw (unsigned long port, const void *src, unsigned long count) +{ + const unsigned short *sp = src; + + while (count--) + platform_outw(*sp++, port); +} + +static inline void +__outsl (unsigned long port, const void *src, unsigned long count) +{ + const unsigned int *sp = src; + + while (count--) + platform_outl(*sp++, port); +} + +/* + * Unfortunately, some platforms are broken and do not follow the IA-64 architecture + * specification regarding legacy I/O support. Thus, we have to make these operations + * platform dependent... + */ +#define __inb platform_inb +#define __inw platform_inw +#define __inl platform_inl +#define __outb platform_outb +#define __outw platform_outw +#define __outl platform_outl +#define __mmiowb platform_mmiowb + +#define inb(p) __inb(p) +#define inw(p) __inw(p) +#define inl(p) __inl(p) +#define insb(p,d,c) __insb(p,d,c) +#define insw(p,d,c) __insw(p,d,c) +#define insl(p,d,c) __insl(p,d,c) +#define outb(v,p) __outb(v,p) +#define outw(v,p) __outw(v,p) +#define outl(v,p) __outl(v,p) +#define outsb(p,s,c) __outsb(p,s,c) +#define outsw(p,s,c) __outsw(p,s,c) +#define outsl(p,s,c) __outsl(p,s,c) +#define mmiowb() __mmiowb() + +/* + * The address passed to these functions are ioremap()ped already. + * + * We need these to be machine vectors since some platforms don't provide + * DMA coherence via PIO reads (PCI drivers and the spec imply that this is + * a good idea). Writes are ok though for all existing ia64 platforms (and + * hopefully it'll stay that way). + */ +static inline unsigned char +___ia64_readb (const volatile void __iomem *addr) +{ + return *(volatile unsigned char __force *)addr; +} + +static inline unsigned short +___ia64_readw (const volatile void __iomem *addr) +{ + return *(volatile unsigned short __force *)addr; +} + +static inline unsigned int +___ia64_readl (const volatile void __iomem *addr) +{ + return *(volatile unsigned int __force *) addr; +} + +static inline unsigned long +___ia64_readq (const volatile void __iomem *addr) +{ + return *(volatile unsigned long __force *) addr; +} + +static inline void +__writeb (unsigned char val, volatile void __iomem *addr) +{ + *(volatile unsigned char __force *) addr = val; +} + +static inline void +__writew (unsigned short val, volatile void __iomem *addr) +{ + *(volatile unsigned short __force *) addr = val; +} + +static inline void +__writel (unsigned int val, volatile void __iomem *addr) +{ + *(volatile unsigned int __force *) addr = val; +} + +static inline void +__writeq (unsigned long val, volatile void __iomem *addr) +{ + *(volatile unsigned long __force *) addr = val; +} + +#define __readb platform_readb +#define __readw platform_readw +#define __readl platform_readl +#define __readq platform_readq +#define __readb_relaxed platform_readb_relaxed +#define __readw_relaxed platform_readw_relaxed +#define __readl_relaxed platform_readl_relaxed +#define __readq_relaxed platform_readq_relaxed + +#define readb(a) __readb((a)) +#define readw(a) __readw((a)) +#define readl(a) __readl((a)) +#define readq(a) __readq((a)) +#define readb_relaxed(a) __readb_relaxed((a)) +#define readw_relaxed(a) __readw_relaxed((a)) +#define readl_relaxed(a) __readl_relaxed((a)) +#define readq_relaxed(a) __readq_relaxed((a)) +#define __raw_readb readb +#define __raw_readw readw +#define __raw_readl readl +#define __raw_readq readq +#define __raw_readb_relaxed readb_relaxed +#define __raw_readw_relaxed readw_relaxed +#define __raw_readl_relaxed readl_relaxed +#define __raw_readq_relaxed readq_relaxed +#define writeb(v,a) __writeb((v), (a)) +#define writew(v,a) __writew((v), (a)) +#define writel(v,a) __writel((v), (a)) +#define writeq(v,a) __writeq((v), (a)) +#define __raw_writeb writeb +#define __raw_writew writew +#define __raw_writel writel +#define __raw_writeq writeq + +#ifndef inb_p +# define inb_p inb +#endif +#ifndef inw_p +# define inw_p inw +#endif +#ifndef inl_p +# define inl_p inl +#endif + +#ifndef outb_p +# define outb_p outb +#endif +#ifndef outw_p +# define outw_p outw +#endif +#ifndef outl_p +# define outl_p outl +#endif + +/* + * An "address" in IO memory space is not clearly either an integer or a pointer. We will + * accept both, thus the casts. + * + * On ia-64, we access the physical I/O memory space through the uncached kernel region. + */ +static inline void __iomem * +ioremap (unsigned long offset, unsigned long size) +{ + return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset)); +} + +static inline void +iounmap (volatile void __iomem *addr) +{ +} + +#define ioremap_nocache(o,s) ioremap(o,s) + +# ifdef __KERNEL__ + +/* + * String version of IO memory access ops: + */ +extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n); +extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n); +extern void memset_io(volatile void __iomem *s, int c, long n); + +#define dma_cache_inv(_start,_size) do { } while (0) +#define dma_cache_wback(_start,_size) do { } while (0) +#define dma_cache_wback_inv(_start,_size) do { } while (0) + +# endif /* __KERNEL__ */ + +/* + * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that + * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64). + * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on + * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing + * over BIO-level virtual merging. + */ +extern unsigned long ia64_max_iommu_merge_mask; +#if 1 +#define BIO_VMERGE_BOUNDARY 0 +#else +/* + * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be + * replaced by dma_merge_mask() or something of that sort. Note: the only way + * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets + * expanded into: + * + * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask) + * + * which is precisely what we want. + */ +#define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1) +#endif + +#endif /* _ASM_IA64_IO_H */ diff --git a/include/asm-ia64/ioctl.h b/include/asm-ia64/ioctl.h new file mode 100644 index 000000000000..be9cc2403d2a --- /dev/null +++ b/include/asm-ia64/ioctl.h @@ -0,0 +1,77 @@ +#ifndef _ASM_IA64_IOCTL_H +#define _ASM_IA64_IOCTL_H + +/* + * Based on <asm-i386/ioctl.h>. + * + * Modified 1998, 1999 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +/* ioctl command encoding: 32 bits total, command in lower 16 bits, + * size of the parameter structure in the lower 14 bits of the + * upper 16 bits. + * Encoding the size of the parameter structure in the ioctl request + * is useful for catching programs compiled with old versions + * and to avoid overwriting user space outside the user buffer area. + * The highest 2 bits are reserved for indicating the ``access mode''. + * NOTE: This limits the max parameter size to 16kB -1 ! + */ + +/* + * The following is for compatibility across the various Linux + * platforms. The ia64 ioctl numbering scheme doesn't really enforce + * a type field. De facto, however, the top 8 bits of the lower 16 + * bits are indeed used as a type field, so we might just as well make + * this explicit here. Please be sure to use the decoding macros + * below from now on. + */ +#define _IOC_NRBITS 8 +#define _IOC_TYPEBITS 8 +#define _IOC_SIZEBITS 14 +#define _IOC_DIRBITS 2 + +#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) +#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) +#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) +#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) + +#define _IOC_NRSHIFT 0 +#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) +#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) +#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) + +/* + * Direction bits. + */ +#define _IOC_NONE 0U +#define _IOC_WRITE 1U +#define _IOC_READ 2U + +#define _IOC(dir,type,nr,size) \ + (((dir) << _IOC_DIRSHIFT) | \ + ((type) << _IOC_TYPESHIFT) | \ + ((nr) << _IOC_NRSHIFT) | \ + ((size) << _IOC_SIZESHIFT)) + +/* used to create numbers */ +#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) +#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) +#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) +#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) + +/* used to decode ioctl numbers.. */ +#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) +#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) +#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) +#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) + +/* ...and for the drivers/sound files... */ + +#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) +#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) +#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) +#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) +#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) + +#endif /* _ASM_IA64_IOCTL_H */ diff --git a/include/asm-ia64/ioctl32.h b/include/asm-ia64/ioctl32.h new file mode 100644 index 000000000000..d0d227f45e05 --- /dev/null +++ b/include/asm-ia64/ioctl32.h @@ -0,0 +1 @@ +#include <linux/ioctl32.h> diff --git a/include/asm-ia64/ioctls.h b/include/asm-ia64/ioctls.h new file mode 100644 index 000000000000..31ee521aeb7a --- /dev/null +++ b/include/asm-ia64/ioctls.h @@ -0,0 +1,89 @@ +#ifndef _ASM_IA64_IOCTLS_H +#define _ASM_IA64_IOCTLS_H + +/* + * Based on <asm-i386/ioctls.h> + * + * Modified 1998, 1999, 2002 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +#include <asm/ioctl.h> + +/* 0x54 is just a magic number to make these relatively unique ('T') */ + +#define TCGETS 0x5401 +#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */ +#define TCSETSW 0x5403 +#define TCSETSF 0x5404 +#define TCGETA 0x5405 +#define TCSETA 0x5406 +#define TCSETAW 0x5407 +#define TCSETAF 0x5408 +#define TCSBRK 0x5409 +#define TCXONC 0x540A +#define TCFLSH 0x540B +#define TIOCEXCL 0x540C +#define TIOCNXCL 0x540D +#define TIOCSCTTY 0x540E +#define TIOCGPGRP 0x540F +#define TIOCSPGRP 0x5410 +#define TIOCOUTQ 0x5411 +#define TIOCSTI 0x5412 +#define TIOCGWINSZ 0x5413 +#define TIOCSWINSZ 0x5414 +#define TIOCMGET 0x5415 +#define TIOCMBIS 0x5416 +#define TIOCMBIC 0x5417 +#define TIOCMSET 0x5418 +#define TIOCGSOFTCAR 0x5419 +#define TIOCSSOFTCAR 0x541A +#define FIONREAD 0x541B +#define TIOCINQ FIONREAD +#define TIOCLINUX 0x541C +#define TIOCCONS 0x541D +#define TIOCGSERIAL 0x541E +#define TIOCSSERIAL 0x541F +#define TIOCPKT 0x5420 +#define FIONBIO 0x5421 +#define TIOCNOTTY 0x5422 +#define TIOCSETD 0x5423 +#define TIOCGETD 0x5424 +#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ + +#define FIONCLEX 0x5450 /* these numbers need to be adjusted. */ +#define FIOCLEX 0x5451 +#define FIOASYNC 0x5452 +#define TIOCSERCONFIG 0x5453 +#define TIOCSERGWILD 0x5454 +#define TIOCSERSWILD 0x5455 +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TIOCSERGETLSR 0x5459 /* Get line status register */ +#define TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ +#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ +#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ +#define FIOQSIZE 0x5460 + +/* Used for packet mode */ +#define TIOCPKT_DATA 0 +#define TIOCPKT_FLUSHREAD 1 +#define TIOCPKT_FLUSHWRITE 2 +#define TIOCPKT_STOP 4 +#define TIOCPKT_START 8 +#define TIOCPKT_NOSTOP 16 +#define TIOCPKT_DOSTOP 32 + +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ + +#endif /* _ASM_IA64_IOCTLS_H */ diff --git a/include/asm-ia64/iosapic.h b/include/asm-ia64/iosapic.h new file mode 100644 index 000000000000..38a7a72791cc --- /dev/null +++ b/include/asm-ia64/iosapic.h @@ -0,0 +1,110 @@ +#ifndef __ASM_IA64_IOSAPIC_H +#define __ASM_IA64_IOSAPIC_H + +#define IOSAPIC_REG_SELECT 0x0 +#define IOSAPIC_WINDOW 0x10 +#define IOSAPIC_EOI 0x40 + +#define IOSAPIC_VERSION 0x1 + +/* + * Redirection table entry + */ +#define IOSAPIC_RTE_LOW(i) (0x10+i*2) +#define IOSAPIC_RTE_HIGH(i) (0x11+i*2) + +#define IOSAPIC_DEST_SHIFT 16 + +/* + * Delivery mode + */ +#define IOSAPIC_DELIVERY_SHIFT 8 +#define IOSAPIC_FIXED 0x0 +#define IOSAPIC_LOWEST_PRIORITY 0x1 +#define IOSAPIC_PMI 0x2 +#define IOSAPIC_NMI 0x4 +#define IOSAPIC_INIT 0x5 +#define IOSAPIC_EXTINT 0x7 + +/* + * Interrupt polarity + */ +#define IOSAPIC_POLARITY_SHIFT 13 +#define IOSAPIC_POL_HIGH 0 +#define IOSAPIC_POL_LOW 1 + +/* + * Trigger mode + */ +#define IOSAPIC_TRIGGER_SHIFT 15 +#define IOSAPIC_EDGE 0 +#define IOSAPIC_LEVEL 1 + +/* + * Mask bit + */ + +#define IOSAPIC_MASK_SHIFT 16 +#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT) + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_IOSAPIC + +#define NR_IOSAPICS 256 + +static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg) +{ + writel(reg, iosapic + IOSAPIC_REG_SELECT); + return readl(iosapic + IOSAPIC_WINDOW); +} + +static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) +{ + writel(reg, iosapic + IOSAPIC_REG_SELECT); + writel(val, iosapic + IOSAPIC_WINDOW); +} + +static inline void iosapic_eoi(char __iomem *iosapic, u32 vector) +{ + writel(vector, iosapic + IOSAPIC_EOI); +} + +extern void __init iosapic_system_init (int pcat_compat); +extern void __init iosapic_init (unsigned long address, + unsigned int gsi_base); +extern int gsi_to_vector (unsigned int gsi); +extern int gsi_to_irq (unsigned int gsi); +extern void iosapic_enable_intr (unsigned int vector); +extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity, + unsigned long trigger); +#ifdef CONFIG_ACPI_DEALLOCATE_IRQ +extern void iosapic_unregister_intr (unsigned int irq); +#endif +extern void __init iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, + unsigned long polarity, + unsigned long trigger); +extern int __init iosapic_register_platform_intr (u32 int_type, + unsigned int gsi, + int pmi_vector, + u16 eid, u16 id, + unsigned long polarity, + unsigned long trigger); +extern unsigned int iosapic_version (char __iomem *addr); + +extern void iosapic_pci_fixup (int); +#ifdef CONFIG_NUMA +extern void __init map_iosapic_to_node (unsigned int, int); +#endif +#else +#define iosapic_system_init(pcat_compat) do { } while (0) +#define iosapic_init(address,gsi_base) do { } while (0) +#define iosapic_register_intr(gsi,polarity,trigger) (gsi) +#define iosapic_unregister_intr(irq) do { } while (0) +#define iosapic_override_isa_irq(isa_irq,gsi,polarity,trigger) do { } while (0) +#define iosapic_register_platform_intr(type,gsi,pmi,eid,id, \ + polarity,trigger) (gsi) +#endif + +# endif /* !__ASSEMBLY__ */ +#endif /* __ASM_IA64_IOSAPIC_H */ diff --git a/include/asm-ia64/ipcbuf.h b/include/asm-ia64/ipcbuf.h new file mode 100644 index 000000000000..079899ae7d32 --- /dev/null +++ b/include/asm-ia64/ipcbuf.h @@ -0,0 +1,28 @@ +#ifndef _ASM_IA64_IPCBUF_H +#define _ASM_IA64_IPCBUF_H + +/* + * The ipc64_perm structure for IA-64 architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit seq + * - 2 miscellaneous 64-bit values + */ + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid_t uid; + __kernel_gid_t gid; + __kernel_uid_t cuid; + __kernel_gid_t cgid; + __kernel_mode_t mode; + unsigned short seq; + unsigned short __pad1; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _ASM_IA64_IPCBUF_H */ diff --git a/include/asm-ia64/irq.h b/include/asm-ia64/irq.h new file mode 100644 index 000000000000..bd07d11d9f37 --- /dev/null +++ b/include/asm-ia64/irq.h @@ -0,0 +1,43 @@ +#ifndef _ASM_IA64_IRQ_H +#define _ASM_IA64_IRQ_H + +/* + * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Stephane Eranian <eranian@hpl.hp.com> + * + * 11/24/98 S.Eranian updated TIMER_IRQ and irq_canonicalize + * 01/20/99 S.Eranian added keyboard interrupt + * 02/29/00 D.Mosberger moved most things into hw_irq.h + */ + +#define NR_IRQS 256 +#define NR_IRQ_VECTORS NR_IRQS + +static __inline__ int +irq_canonicalize (int irq) +{ + /* + * We do the legacy thing here of pretending that irqs < 16 + * are 8259 irqs. This really shouldn't be necessary at all, + * but we keep it here as serial.c still uses it... + */ + return ((irq == 2) ? 9 : irq); +} + +extern void disable_irq (unsigned int); +extern void disable_irq_nosync (unsigned int); +extern void enable_irq (unsigned int); +extern void set_irq_affinity_info (unsigned int irq, int dest, int redir); + +#ifdef CONFIG_SMP +extern void move_irq(int irq); +#else +#define move_irq(irq) +#endif + +struct irqaction; +struct pt_regs; +int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *); + +#endif /* _ASM_IA64_IRQ_H */ diff --git a/include/asm-ia64/kmap_types.h b/include/asm-ia64/kmap_types.h new file mode 100644 index 000000000000..bc777525fa12 --- /dev/null +++ b/include/asm-ia64/kmap_types.h @@ -0,0 +1,31 @@ +#ifndef _ASM_IA64_KMAP_TYPES_H +#define _ASM_IA64_KMAP_TYPES_H + +#include <linux/config.h> + +#ifdef CONFIG_DEBUG_HIGHMEM +# define D(n) __KM_FENCE_##n , +#else +# define D(n) +#endif + +enum km_type { +D(0) KM_BOUNCE_READ, +D(1) KM_SKB_SUNRPC_DATA, +D(2) KM_SKB_DATA_SOFTIRQ, +D(3) KM_USER0, +D(4) KM_USER1, +D(5) KM_BIO_SRC_IRQ, +D(6) KM_BIO_DST_IRQ, +D(7) KM_PTE0, +D(8) KM_PTE1, +D(9) KM_IRQ0, +D(10) KM_IRQ1, +D(11) KM_SOFTIRQ0, +D(12) KM_SOFTIRQ1, +D(13) KM_TYPE_NR +}; + +#undef D + +#endif /* _ASM_IA64_KMAP_TYPES_H */ diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h new file mode 100644 index 000000000000..221b5cb564b2 --- /dev/null +++ b/include/asm-ia64/kregs.h @@ -0,0 +1,163 @@ +#ifndef _ASM_IA64_KREGS_H +#define _ASM_IA64_KREGS_H + +/* + * Copyright (C) 2001-2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ +/* + * This file defines the kernel register usage convention used by Linux/ia64. + */ + +/* + * Kernel registers: + */ +#define IA64_KR_IO_BASE 0 /* ar.k0: legacy I/O base address */ +#define IA64_KR_TSSD 1 /* ar.k1: IVE uses this as the TSSD */ +#define IA64_KR_PER_CPU_DATA 3 /* ar.k3: physical per-CPU base */ +#define IA64_KR_CURRENT_STACK 4 /* ar.k4: what's mapped in IA64_TR_CURRENT_STACK */ +#define IA64_KR_FPU_OWNER 5 /* ar.k5: fpu-owner (UP only, at the moment) */ +#define IA64_KR_CURRENT 6 /* ar.k6: "current" task pointer */ +#define IA64_KR_PT_BASE 7 /* ar.k7: page table base address (physical) */ + +#define _IA64_KR_PASTE(x,y) x##y +#define _IA64_KR_PREFIX(n) _IA64_KR_PASTE(ar.k, n) +#define IA64_KR(n) _IA64_KR_PREFIX(IA64_KR_##n) + +/* + * Translation registers: + */ +#define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code & data) */ +#define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ +#define IA64_TR_PERCPU_DATA 1 /* dtr1: percpu data */ +#define IA64_TR_CURRENT_STACK 2 /* dtr2: maps kernel's memory- & register-stacks */ + +/* Processor status register bits: */ +#define IA64_PSR_BE_BIT 1 +#define IA64_PSR_UP_BIT 2 +#define IA64_PSR_AC_BIT 3 +#define IA64_PSR_MFL_BIT 4 +#define IA64_PSR_MFH_BIT 5 +#define IA64_PSR_IC_BIT 13 +#define IA64_PSR_I_BIT 14 +#define IA64_PSR_PK_BIT 15 +#define IA64_PSR_DT_BIT 17 +#define IA64_PSR_DFL_BIT 18 +#define IA64_PSR_DFH_BIT 19 +#define IA64_PSR_SP_BIT 20 +#define IA64_PSR_PP_BIT 21 +#define IA64_PSR_DI_BIT 22 +#define IA64_PSR_SI_BIT 23 +#define IA64_PSR_DB_BIT 24 +#define IA64_PSR_LP_BIT 25 +#define IA64_PSR_TB_BIT 26 +#define IA64_PSR_RT_BIT 27 +/* The following are not affected by save_flags()/restore_flags(): */ +#define IA64_PSR_CPL0_BIT 32 +#define IA64_PSR_CPL1_BIT 33 +#define IA64_PSR_IS_BIT 34 +#define IA64_PSR_MC_BIT 35 +#define IA64_PSR_IT_BIT 36 +#define IA64_PSR_ID_BIT 37 +#define IA64_PSR_DA_BIT 38 +#define IA64_PSR_DD_BIT 39 +#define IA64_PSR_SS_BIT 40 +#define IA64_PSR_RI_BIT 41 +#define IA64_PSR_ED_BIT 43 +#define IA64_PSR_BN_BIT 44 +#define IA64_PSR_IA_BIT 45 + +/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an + execve(). Only list flags here that need to be cleared/set for BOTH clone2() and + execve(). */ +#define IA64_PSR_BITS_TO_CLEAR (IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_DB | IA64_PSR_LP | \ + IA64_PSR_TB | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \ + IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA) +#define IA64_PSR_BITS_TO_SET (IA64_PSR_DFH | IA64_PSR_SP) + +#define IA64_PSR_BE (__IA64_UL(1) << IA64_PSR_BE_BIT) +#define IA64_PSR_UP (__IA64_UL(1) << IA64_PSR_UP_BIT) +#define IA64_PSR_AC (__IA64_UL(1) << IA64_PSR_AC_BIT) +#define IA64_PSR_MFL (__IA64_UL(1) << IA64_PSR_MFL_BIT) +#define IA64_PSR_MFH (__IA64_UL(1) << IA64_PSR_MFH_BIT) +#define IA64_PSR_IC (__IA64_UL(1) << IA64_PSR_IC_BIT) +#define IA64_PSR_I (__IA64_UL(1) << IA64_PSR_I_BIT) +#define IA64_PSR_PK (__IA64_UL(1) << IA64_PSR_PK_BIT) +#define IA64_PSR_DT (__IA64_UL(1) << IA64_PSR_DT_BIT) +#define IA64_PSR_DFL (__IA64_UL(1) << IA64_PSR_DFL_BIT) +#define IA64_PSR_DFH (__IA64_UL(1) << IA64_PSR_DFH_BIT) +#define IA64_PSR_SP (__IA64_UL(1) << IA64_PSR_SP_BIT) +#define IA64_PSR_PP (__IA64_UL(1) << IA64_PSR_PP_BIT) +#define IA64_PSR_DI (__IA64_UL(1) << IA64_PSR_DI_BIT) +#define IA64_PSR_SI (__IA64_UL(1) << IA64_PSR_SI_BIT) +#define IA64_PSR_DB (__IA64_UL(1) << IA64_PSR_DB_BIT) +#define IA64_PSR_LP (__IA64_UL(1) << IA64_PSR_LP_BIT) +#define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT) +#define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT) +/* The following are not affected by save_flags()/restore_flags(): */ +#define IA64_PSR_CPL (__IA64_UL(3) << IA64_PSR_CPL0_BIT) +#define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT) +#define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT) +#define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT) +#define IA64_PSR_ID (__IA64_UL(1) << IA64_PSR_ID_BIT) +#define IA64_PSR_DA (__IA64_UL(1) << IA64_PSR_DA_BIT) +#define IA64_PSR_DD (__IA64_UL(1) << IA64_PSR_DD_BIT) +#define IA64_PSR_SS (__IA64_UL(1) << IA64_PSR_SS_BIT) +#define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT) +#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT) +#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT) +#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT) + +/* User mask bits: */ +#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH) + +/* Default Control Register */ +#define IA64_DCR_PP_BIT 0 /* privileged performance monitor default */ +#define IA64_DCR_BE_BIT 1 /* big-endian default */ +#define IA64_DCR_LC_BIT 2 /* ia32 lock-check enable */ +#define IA64_DCR_DM_BIT 8 /* defer TLB miss faults */ +#define IA64_DCR_DP_BIT 9 /* defer page-not-present faults */ +#define IA64_DCR_DK_BIT 10 /* defer key miss faults */ +#define IA64_DCR_DX_BIT 11 /* defer key permission faults */ +#define IA64_DCR_DR_BIT 12 /* defer access right faults */ +#define IA64_DCR_DA_BIT 13 /* defer access bit faults */ +#define IA64_DCR_DD_BIT 14 /* defer debug faults */ + +#define IA64_DCR_PP (__IA64_UL(1) << IA64_DCR_PP_BIT) +#define IA64_DCR_BE (__IA64_UL(1) << IA64_DCR_BE_BIT) +#define IA64_DCR_LC (__IA64_UL(1) << IA64_DCR_LC_BIT) +#define IA64_DCR_DM (__IA64_UL(1) << IA64_DCR_DM_BIT) +#define IA64_DCR_DP (__IA64_UL(1) << IA64_DCR_DP_BIT) +#define IA64_DCR_DK (__IA64_UL(1) << IA64_DCR_DK_BIT) +#define IA64_DCR_DX (__IA64_UL(1) << IA64_DCR_DX_BIT) +#define IA64_DCR_DR (__IA64_UL(1) << IA64_DCR_DR_BIT) +#define IA64_DCR_DA (__IA64_UL(1) << IA64_DCR_DA_BIT) +#define IA64_DCR_DD (__IA64_UL(1) << IA64_DCR_DD_BIT) + +/* Interrupt Status Register */ +#define IA64_ISR_X_BIT 32 /* execute access */ +#define IA64_ISR_W_BIT 33 /* write access */ +#define IA64_ISR_R_BIT 34 /* read access */ +#define IA64_ISR_NA_BIT 35 /* non-access */ +#define IA64_ISR_SP_BIT 36 /* speculative load exception */ +#define IA64_ISR_RS_BIT 37 /* mandatory register-stack exception */ +#define IA64_ISR_IR_BIT 38 /* invalid register frame exception */ +#define IA64_ISR_CODE_MASK 0xf + +#define IA64_ISR_X (__IA64_UL(1) << IA64_ISR_X_BIT) +#define IA64_ISR_W (__IA64_UL(1) << IA64_ISR_W_BIT) +#define IA64_ISR_R (__IA64_UL(1) << IA64_ISR_R_BIT) +#define IA64_ISR_NA (__IA64_UL(1) << IA64_ISR_NA_BIT) +#define IA64_ISR_SP (__IA64_UL(1) << IA64_ISR_SP_BIT) +#define IA64_ISR_RS (__IA64_UL(1) << IA64_ISR_RS_BIT) +#define IA64_ISR_IR (__IA64_UL(1) << IA64_ISR_IR_BIT) + +/* ISR code field for non-access instructions */ +#define IA64_ISR_CODE_TPA 0 +#define IA64_ISR_CODE_FC 1 +#define IA64_ISR_CODE_PROBE 2 +#define IA64_ISR_CODE_TAK 3 +#define IA64_ISR_CODE_LFETCH 4 +#define IA64_ISR_CODE_PROBEF 5 + +#endif /* _ASM_IA64_kREGS_H */ diff --git a/include/asm-ia64/linkage.h b/include/asm-ia64/linkage.h new file mode 100644 index 000000000000..14cd72cd8007 --- /dev/null +++ b/include/asm-ia64/linkage.h @@ -0,0 +1,6 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define asmlinkage CPP_ASMLINKAGE __attribute__((syscall_linkage)) + +#endif diff --git a/include/asm-ia64/local.h b/include/asm-ia64/local.h new file mode 100644 index 000000000000..1dbd584ad851 --- /dev/null +++ b/include/asm-ia64/local.h @@ -0,0 +1,50 @@ +#ifndef _ASM_IA64_LOCAL_H +#define _ASM_IA64_LOCAL_H + +/* + * Copyright (C) 2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/percpu.h> + +typedef struct { + atomic64_t val; +} local_t; + +#define LOCAL_INIT(i) ((local_t) { { (i) } }) +#define local_read(l) atomic64_read(&(l)->val) +#define local_set(l, i) atomic64_set(&(l)->val, i) +#define local_inc(l) atomic64_inc(&(l)->val) +#define local_dec(l) atomic64_dec(&(l)->val) +#define local_add(l) atomic64_add(&(l)->val) +#define local_sub(l) atomic64_sub(&(l)->val) + +/* Non-atomic variants, i.e., preemption disabled and won't be touched in interrupt, etc. */ + +#define __local_inc(l) (++(l)->val.counter) +#define __local_dec(l) (--(l)->val.counter) +#define __local_add(i,l) ((l)->val.counter += (i)) +#define __local_sub(i,l) ((l)->val.counter -= (i)) + +/* + * Use these for per-cpu local_t variables. Note they take a variable (eg. mystruct.foo), + * not an address. + */ +#define cpu_local_read(v) local_read(&__ia64_per_cpu_var(v)) +#define cpu_local_set(v, i) local_set(&__ia64_per_cpu_var(v), (i)) +#define cpu_local_inc(v) local_inc(&__ia64_per_cpu_var(v)) +#define cpu_local_dec(v) local_dec(&__ia64_per_cpu_var(v)) +#define cpu_local_add(i, v) local_add((i), &__ia64_per_cpu_var(v)) +#define cpu_local_sub(i, v) local_sub((i), &__ia64_per_cpu_var(v)) + +/* + * Non-atomic increments, i.e., preemption disabled and won't be touched in interrupt, + * etc. + */ +#define __cpu_local_inc(v) __local_inc(&__ia64_per_cpu_var(v)) +#define __cpu_local_dec(v) __local_dec(&__ia64_per_cpu_var(v)) +#define __cpu_local_add(i, v) __local_add((i), &__ia64_per_cpu_var(v)) +#define __cpu_local_sub(i, v) __local_sub((i), &__ia64_per_cpu_var(v)) + +#endif /* _ASM_IA64_LOCAL_H */ diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h new file mode 100644 index 000000000000..79e89a7db566 --- /dev/null +++ b/include/asm-ia64/machvec.h @@ -0,0 +1,390 @@ +/* + * Machine vector for IA-64. + * + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> + * Copyright (C) Vijay Chander <vijay@engr.sgi.com> + * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co. + * David Mosberger-Tang <davidm@hpl.hp.com> + */ +#ifndef _ASM_IA64_MACHVEC_H +#define _ASM_IA64_MACHVEC_H + +#include <linux/config.h> +#include <linux/types.h> + +/* forward declarations: */ +struct device; +struct pt_regs; +struct scatterlist; +struct page; +struct mm_struct; +struct pci_bus; + +typedef void ia64_mv_setup_t (char **); +typedef void ia64_mv_cpu_init_t (void); +typedef void ia64_mv_irq_init_t (void); +typedef void ia64_mv_send_ipi_t (int, int, int, int); +typedef void ia64_mv_timer_interrupt_t (int, void *, struct pt_regs *); +typedef void ia64_mv_global_tlb_purge_t (unsigned long, unsigned long, unsigned long); +typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *); +typedef unsigned int ia64_mv_local_vector_to_irq (u8); +typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *); +typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val, + u8 size); +typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, + u8 size); + +/* DMA-mapping interface: */ +typedef void ia64_mv_dma_init (void); +typedef void *ia64_mv_dma_alloc_coherent (struct device *, size_t, dma_addr_t *, int); +typedef void ia64_mv_dma_free_coherent (struct device *, size_t, void *, dma_addr_t); +typedef dma_addr_t ia64_mv_dma_map_single (struct device *, void *, size_t, int); +typedef void ia64_mv_dma_unmap_single (struct device *, dma_addr_t, size_t, int); +typedef int ia64_mv_dma_map_sg (struct device *, struct scatterlist *, int, int); +typedef void ia64_mv_dma_unmap_sg (struct device *, struct scatterlist *, int, int); +typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_t, int); +typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); +typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); +typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); +typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); +typedef int ia64_mv_dma_supported (struct device *, u64); + +/* + * WARNING: The legacy I/O space is _architected_. Platforms are + * expected to follow this architected model (see Section 10.7 in the + * IA-64 Architecture Software Developer's Manual). Unfortunately, + * some broken machines do not follow that model, which is why we have + * to make the inX/outX operations part of the machine vector. + * Platform designers should follow the architected model whenever + * possible. + */ +typedef unsigned int ia64_mv_inb_t (unsigned long); +typedef unsigned int ia64_mv_inw_t (unsigned long); +typedef unsigned int ia64_mv_inl_t (unsigned long); +typedef void ia64_mv_outb_t (unsigned char, unsigned long); +typedef void ia64_mv_outw_t (unsigned short, unsigned long); +typedef void ia64_mv_outl_t (unsigned int, unsigned long); +typedef void ia64_mv_mmiowb_t (void); +typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *); +typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *); +typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *); +typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *); +typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *); +typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *); +typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *); +typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *); + +static inline void +machvec_noop (void) +{ +} + +static inline void +machvec_noop_mm (struct mm_struct *mm) +{ +} + +extern void machvec_setup (char **); +extern void machvec_timer_interrupt (int, void *, struct pt_regs *); +extern void machvec_dma_sync_single (struct device *, dma_addr_t, size_t, int); +extern void machvec_dma_sync_sg (struct device *, struct scatterlist *, int, int); +extern void machvec_tlb_migrate_finish (struct mm_struct *); + +# if defined (CONFIG_IA64_HP_SIM) +# include <asm/machvec_hpsim.h> +# elif defined (CONFIG_IA64_DIG) +# include <asm/machvec_dig.h> +# elif defined (CONFIG_IA64_HP_ZX1) +# include <asm/machvec_hpzx1.h> +# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) +# include <asm/machvec_hpzx1_swiotlb.h> +# elif defined (CONFIG_IA64_SGI_SN2) +# include <asm/machvec_sn2.h> +# elif defined (CONFIG_IA64_GENERIC) + +# ifdef MACHVEC_PLATFORM_HEADER +# include MACHVEC_PLATFORM_HEADER +# else +# define platform_name ia64_mv.name +# define platform_setup ia64_mv.setup +# define platform_cpu_init ia64_mv.cpu_init +# define platform_irq_init ia64_mv.irq_init +# define platform_send_ipi ia64_mv.send_ipi +# define platform_timer_interrupt ia64_mv.timer_interrupt +# define platform_global_tlb_purge ia64_mv.global_tlb_purge +# define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish +# define platform_dma_init ia64_mv.dma_init +# define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent +# define platform_dma_free_coherent ia64_mv.dma_free_coherent +# define platform_dma_map_single ia64_mv.dma_map_single +# define platform_dma_unmap_single ia64_mv.dma_unmap_single +# define platform_dma_map_sg ia64_mv.dma_map_sg +# define platform_dma_unmap_sg ia64_mv.dma_unmap_sg +# define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu +# define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu +# define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device +# define platform_dma_sync_sg_for_device ia64_mv.dma_sync_sg_for_device +# define platform_dma_mapping_error ia64_mv.dma_mapping_error +# define platform_dma_supported ia64_mv.dma_supported +# define platform_local_vector_to_irq ia64_mv.local_vector_to_irq +# define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem +# define platform_pci_legacy_read ia64_mv.pci_legacy_read +# define platform_pci_legacy_write ia64_mv.pci_legacy_write +# define platform_inb ia64_mv.inb +# define platform_inw ia64_mv.inw +# define platform_inl ia64_mv.inl +# define platform_outb ia64_mv.outb +# define platform_outw ia64_mv.outw +# define platform_outl ia64_mv.outl +# define platform_mmiowb ia64_mv.mmiowb +# define platform_readb ia64_mv.readb +# define platform_readw ia64_mv.readw +# define platform_readl ia64_mv.readl +# define platform_readq ia64_mv.readq +# define platform_readb_relaxed ia64_mv.readb_relaxed +# define platform_readw_relaxed ia64_mv.readw_relaxed +# define platform_readl_relaxed ia64_mv.readl_relaxed +# define platform_readq_relaxed ia64_mv.readq_relaxed +# endif + +/* __attribute__((__aligned__(16))) is required to make size of the + * structure multiple of 16 bytes. + * This will fillup the holes created because of section 3.3.1 in + * Software Conventions guide. + */ +struct ia64_machine_vector { + const char *name; + ia64_mv_setup_t *setup; + ia64_mv_cpu_init_t *cpu_init; + ia64_mv_irq_init_t *irq_init; + ia64_mv_send_ipi_t *send_ipi; + ia64_mv_timer_interrupt_t *timer_interrupt; + ia64_mv_global_tlb_purge_t *global_tlb_purge; + ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; + ia64_mv_dma_init *dma_init; + ia64_mv_dma_alloc_coherent *dma_alloc_coherent; + ia64_mv_dma_free_coherent *dma_free_coherent; + ia64_mv_dma_map_single *dma_map_single; + ia64_mv_dma_unmap_single *dma_unmap_single; + ia64_mv_dma_map_sg *dma_map_sg; + ia64_mv_dma_unmap_sg *dma_unmap_sg; + ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; + ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; + ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; + ia64_mv_dma_sync_sg_for_device *dma_sync_sg_for_device; + ia64_mv_dma_mapping_error *dma_mapping_error; + ia64_mv_dma_supported *dma_supported; + ia64_mv_local_vector_to_irq *local_vector_to_irq; + ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; + ia64_mv_pci_legacy_read_t *pci_legacy_read; + ia64_mv_pci_legacy_write_t *pci_legacy_write; + ia64_mv_inb_t *inb; + ia64_mv_inw_t *inw; + ia64_mv_inl_t *inl; + ia64_mv_outb_t *outb; + ia64_mv_outw_t *outw; + ia64_mv_outl_t *outl; + ia64_mv_mmiowb_t *mmiowb; + ia64_mv_readb_t *readb; + ia64_mv_readw_t *readw; + ia64_mv_readl_t *readl; + ia64_mv_readq_t *readq; + ia64_mv_readb_relaxed_t *readb_relaxed; + ia64_mv_readw_relaxed_t *readw_relaxed; + ia64_mv_readl_relaxed_t *readl_relaxed; + ia64_mv_readq_relaxed_t *readq_relaxed; +} __attribute__((__aligned__(16))); /* align attrib? see above comment */ + +#define MACHVEC_INIT(name) \ +{ \ + #name, \ + platform_setup, \ + platform_cpu_init, \ + platform_irq_init, \ + platform_send_ipi, \ + platform_timer_interrupt, \ + platform_global_tlb_purge, \ + platform_tlb_migrate_finish, \ + platform_dma_init, \ + platform_dma_alloc_coherent, \ + platform_dma_free_coherent, \ + platform_dma_map_single, \ + platform_dma_unmap_single, \ + platform_dma_map_sg, \ + platform_dma_unmap_sg, \ + platform_dma_sync_single_for_cpu, \ + platform_dma_sync_sg_for_cpu, \ + platform_dma_sync_single_for_device, \ + platform_dma_sync_sg_for_device, \ + platform_dma_mapping_error, \ + platform_dma_supported, \ + platform_local_vector_to_irq, \ + platform_pci_get_legacy_mem, \ + platform_pci_legacy_read, \ + platform_pci_legacy_write, \ + platform_inb, \ + platform_inw, \ + platform_inl, \ + platform_outb, \ + platform_outw, \ + platform_outl, \ + platform_mmiowb, \ + platform_readb, \ + platform_readw, \ + platform_readl, \ + platform_readq, \ + platform_readb_relaxed, \ + platform_readw_relaxed, \ + platform_readl_relaxed, \ + platform_readq_relaxed, \ +} + +extern struct ia64_machine_vector ia64_mv; +extern void machvec_init (const char *name); + +# else +# error Unknown configuration. Update asm-ia64/machvec.h. +# endif /* CONFIG_IA64_GENERIC */ + +/* + * Declare default routines which aren't declared anywhere else: + */ +extern ia64_mv_dma_init swiotlb_init; +extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; +extern ia64_mv_dma_free_coherent swiotlb_free_coherent; +extern ia64_mv_dma_map_single swiotlb_map_single; +extern ia64_mv_dma_unmap_single swiotlb_unmap_single; +extern ia64_mv_dma_map_sg swiotlb_map_sg; +extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; +extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu; +extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu; +extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device; +extern ia64_mv_dma_sync_sg_for_device swiotlb_sync_sg_for_device; +extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; +extern ia64_mv_dma_supported swiotlb_dma_supported; + +/* + * Define default versions so we can extend machvec for new platforms without having + * to update the machvec files for all existing platforms. + */ +#ifndef platform_setup +# define platform_setup machvec_setup +#endif +#ifndef platform_cpu_init +# define platform_cpu_init machvec_noop +#endif +#ifndef platform_irq_init +# define platform_irq_init machvec_noop +#endif + +#ifndef platform_send_ipi +# define platform_send_ipi ia64_send_ipi /* default to architected version */ +#endif +#ifndef platform_timer_interrupt +# define platform_timer_interrupt machvec_timer_interrupt +#endif +#ifndef platform_global_tlb_purge +# define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */ +#endif +#ifndef platform_tlb_migrate_finish +# define platform_tlb_migrate_finish machvec_noop_mm +#endif +#ifndef platform_dma_init +# define platform_dma_init swiotlb_init +#endif +#ifndef platform_dma_alloc_coherent +# define platform_dma_alloc_coherent swiotlb_alloc_coherent +#endif +#ifndef platform_dma_free_coherent +# define platform_dma_free_coherent swiotlb_free_coherent +#endif +#ifndef platform_dma_map_single +# define platform_dma_map_single swiotlb_map_single +#endif +#ifndef platform_dma_unmap_single +# define platform_dma_unmap_single swiotlb_unmap_single +#endif +#ifndef platform_dma_map_sg +# define platform_dma_map_sg swiotlb_map_sg +#endif +#ifndef platform_dma_unmap_sg +# define platform_dma_unmap_sg swiotlb_unmap_sg +#endif +#ifndef platform_dma_sync_single_for_cpu +# define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu +#endif +#ifndef platform_dma_sync_sg_for_cpu +# define platform_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu +#endif +#ifndef platform_dma_sync_single_for_device +# define platform_dma_sync_single_for_device swiotlb_sync_single_for_device +#endif +#ifndef platform_dma_sync_sg_for_device +# define platform_dma_sync_sg_for_device swiotlb_sync_sg_for_device +#endif +#ifndef platform_dma_mapping_error +# define platform_dma_mapping_error swiotlb_dma_mapping_error +#endif +#ifndef platform_dma_supported +# define platform_dma_supported swiotlb_dma_supported +#endif +#ifndef platform_local_vector_to_irq +# define platform_local_vector_to_irq __ia64_local_vector_to_irq +#endif +#ifndef platform_pci_get_legacy_mem +# define platform_pci_get_legacy_mem ia64_pci_get_legacy_mem +#endif +#ifndef platform_pci_legacy_read +# define platform_pci_legacy_read ia64_pci_legacy_read +#endif +#ifndef platform_pci_legacy_write +# define platform_pci_legacy_write ia64_pci_legacy_write +#endif +#ifndef platform_inb +# define platform_inb __ia64_inb +#endif +#ifndef platform_inw +# define platform_inw __ia64_inw +#endif +#ifndef platform_inl +# define platform_inl __ia64_inl +#endif +#ifndef platform_outb +# define platform_outb __ia64_outb +#endif +#ifndef platform_outw +# define platform_outw __ia64_outw +#endif +#ifndef platform_outl +# define platform_outl __ia64_outl +#endif +#ifndef platform_mmiowb +# define platform_mmiowb __ia64_mmiowb +#endif +#ifndef platform_readb +# define platform_readb __ia64_readb +#endif +#ifndef platform_readw +# define platform_readw __ia64_readw +#endif +#ifndef platform_readl +# define platform_readl __ia64_readl +#endif +#ifndef platform_readq +# define platform_readq __ia64_readq +#endif +#ifndef platform_readb_relaxed +# define platform_readb_relaxed __ia64_readb_relaxed +#endif +#ifndef platform_readw_relaxed +# define platform_readw_relaxed __ia64_readw_relaxed +#endif +#ifndef platform_readl_relaxed +# define platform_readl_relaxed __ia64_readl_relaxed +#endif +#ifndef platform_readq_relaxed +# define platform_readq_relaxed __ia64_readq_relaxed +#endif + +#endif /* _ASM_IA64_MACHVEC_H */ diff --git a/include/asm-ia64/machvec_dig.h b/include/asm-ia64/machvec_dig.h new file mode 100644 index 000000000000..4dc8522c974f --- /dev/null +++ b/include/asm-ia64/machvec_dig.h @@ -0,0 +1,18 @@ +#ifndef _ASM_IA64_MACHVEC_DIG_h +#define _ASM_IA64_MACHVEC_DIG_h + +extern ia64_mv_setup_t dig_setup; +extern ia64_mv_irq_init_t dig_irq_init; + +/* + * This stuff has dual use! + * + * For a generic kernel, the macros are used to initialize the + * platform's machvec structure. When compiling a non-generic kernel, + * the macros are used directly. + */ +#define platform_name "dig" +#define platform_setup dig_setup +#define platform_irq_init dig_irq_init + +#endif /* _ASM_IA64_MACHVEC_DIG_h */ diff --git a/include/asm-ia64/machvec_hpsim.h b/include/asm-ia64/machvec_hpsim.h new file mode 100644 index 000000000000..cf72fc87fdfe --- /dev/null +++ b/include/asm-ia64/machvec_hpsim.h @@ -0,0 +1,18 @@ +#ifndef _ASM_IA64_MACHVEC_HPSIM_h +#define _ASM_IA64_MACHVEC_HPSIM_h + +extern ia64_mv_setup_t hpsim_setup; +extern ia64_mv_irq_init_t hpsim_irq_init; + +/* + * This stuff has dual use! + * + * For a generic kernel, the macros are used to initialize the + * platform's machvec structure. When compiling a non-generic kernel, + * the macros are used directly. + */ +#define platform_name "hpsim" +#define platform_setup hpsim_setup +#define platform_irq_init hpsim_irq_init + +#endif /* _ASM_IA64_MACHVEC_HPSIM_h */ diff --git a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h new file mode 100644 index 000000000000..daafe504c5f4 --- /dev/null +++ b/include/asm-ia64/machvec_hpzx1.h @@ -0,0 +1,38 @@ +#ifndef _ASM_IA64_MACHVEC_HPZX1_h +#define _ASM_IA64_MACHVEC_HPZX1_h + +extern ia64_mv_setup_t dig_setup; +extern ia64_mv_setup_t sba_setup; +extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; +extern ia64_mv_dma_free_coherent sba_free_coherent; +extern ia64_mv_dma_map_single sba_map_single; +extern ia64_mv_dma_unmap_single sba_unmap_single; +extern ia64_mv_dma_map_sg sba_map_sg; +extern ia64_mv_dma_unmap_sg sba_unmap_sg; +extern ia64_mv_dma_supported sba_dma_supported; +extern ia64_mv_dma_mapping_error sba_dma_mapping_error; + +/* + * This stuff has dual use! + * + * For a generic kernel, the macros are used to initialize the + * platform's machvec structure. When compiling a non-generic kernel, + * the macros are used directly. + */ +#define platform_name "hpzx1" +#define platform_setup sba_setup +#define platform_dma_init machvec_noop +#define platform_dma_alloc_coherent sba_alloc_coherent +#define platform_dma_free_coherent sba_free_coherent +#define platform_dma_map_single sba_map_single +#define platform_dma_unmap_single sba_unmap_single +#define platform_dma_map_sg sba_map_sg +#define platform_dma_unmap_sg sba_unmap_sg +#define platform_dma_sync_single_for_cpu machvec_dma_sync_single +#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg +#define platform_dma_sync_single_for_device machvec_dma_sync_single +#define platform_dma_sync_sg_for_device machvec_dma_sync_sg +#define platform_dma_supported sba_dma_supported +#define platform_dma_mapping_error sba_dma_mapping_error + +#endif /* _ASM_IA64_MACHVEC_HPZX1_h */ diff --git a/include/asm-ia64/machvec_hpzx1_swiotlb.h b/include/asm-ia64/machvec_hpzx1_swiotlb.h new file mode 100644 index 000000000000..9924b1b00a6c --- /dev/null +++ b/include/asm-ia64/machvec_hpzx1_swiotlb.h @@ -0,0 +1,43 @@ +#ifndef _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h +#define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h + +extern ia64_mv_setup_t dig_setup; +extern ia64_mv_dma_init hwsw_init; +extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; +extern ia64_mv_dma_free_coherent hwsw_free_coherent; +extern ia64_mv_dma_map_single hwsw_map_single; +extern ia64_mv_dma_unmap_single hwsw_unmap_single; +extern ia64_mv_dma_map_sg hwsw_map_sg; +extern ia64_mv_dma_unmap_sg hwsw_unmap_sg; +extern ia64_mv_dma_supported hwsw_dma_supported; +extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error; +extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu; +extern ia64_mv_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu; +extern ia64_mv_dma_sync_single_for_device hwsw_sync_single_for_device; +extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; + +/* + * This stuff has dual use! + * + * For a generic kernel, the macros are used to initialize the + * platform's machvec structure. When compiling a non-generic kernel, + * the macros are used directly. + */ +#define platform_name "hpzx1_swiotlb" + +#define platform_setup dig_setup +#define platform_dma_init hwsw_init +#define platform_dma_alloc_coherent hwsw_alloc_coherent +#define platform_dma_free_coherent hwsw_free_coherent +#define platform_dma_map_single hwsw_map_single +#define platform_dma_unmap_single hwsw_unmap_single +#define platform_dma_map_sg hwsw_map_sg +#define platform_dma_unmap_sg hwsw_unmap_sg +#define platform_dma_supported hwsw_dma_supported +#define platform_dma_mapping_error hwsw_dma_mapping_error +#define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu +#define platform_dma_sync_sg_for_cpu hwsw_sync_sg_for_cpu +#define platform_dma_sync_single_for_device hwsw_sync_single_for_device +#define platform_dma_sync_sg_for_device hwsw_sync_sg_for_device + +#endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */ diff --git a/include/asm-ia64/machvec_init.h b/include/asm-ia64/machvec_init.h new file mode 100644 index 000000000000..2d36f6840f0b --- /dev/null +++ b/include/asm-ia64/machvec_init.h @@ -0,0 +1,32 @@ +#include <asm/machvec.h> + +extern ia64_mv_send_ipi_t ia64_send_ipi; +extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge; +extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq; +extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem; +extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read; +extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write; + +extern ia64_mv_inb_t __ia64_inb; +extern ia64_mv_inw_t __ia64_inw; +extern ia64_mv_inl_t __ia64_inl; +extern ia64_mv_outb_t __ia64_outb; +extern ia64_mv_outw_t __ia64_outw; +extern ia64_mv_outl_t __ia64_outl; +extern ia64_mv_mmiowb_t __ia64_mmiowb; +extern ia64_mv_readb_t __ia64_readb; +extern ia64_mv_readw_t __ia64_readw; +extern ia64_mv_readl_t __ia64_readl; +extern ia64_mv_readq_t __ia64_readq; +extern ia64_mv_readb_t __ia64_readb_relaxed; +extern ia64_mv_readw_t __ia64_readw_relaxed; +extern ia64_mv_readl_t __ia64_readl_relaxed; +extern ia64_mv_readq_t __ia64_readq_relaxed; + +#define MACHVEC_HELPER(name) \ + struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \ + = MACHVEC_INIT(name); + +#define MACHVEC_DEFINE(name) MACHVEC_HELPER(name) + +MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME) diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h new file mode 100644 index 000000000000..e1b6cd63f49e --- /dev/null +++ b/include/asm-ia64/machvec_sn2.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/NoticeExplan + */ + +#ifndef _ASM_IA64_MACHVEC_SN2_H +#define _ASM_IA64_MACHVEC_SN2_H + +extern ia64_mv_setup_t sn_setup; +extern ia64_mv_cpu_init_t sn_cpu_init; +extern ia64_mv_irq_init_t sn_irq_init; +extern ia64_mv_send_ipi_t sn2_send_IPI; +extern ia64_mv_timer_interrupt_t sn_timer_interrupt; +extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge; +extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish; +extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq; +extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem; +extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read; +extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write; +extern ia64_mv_inb_t __sn_inb; +extern ia64_mv_inw_t __sn_inw; +extern ia64_mv_inl_t __sn_inl; +extern ia64_mv_outb_t __sn_outb; +extern ia64_mv_outw_t __sn_outw; +extern ia64_mv_outl_t __sn_outl; +extern ia64_mv_mmiowb_t __sn_mmiowb; +extern ia64_mv_readb_t __sn_readb; +extern ia64_mv_readw_t __sn_readw; +extern ia64_mv_readl_t __sn_readl; +extern ia64_mv_readq_t __sn_readq; +extern ia64_mv_readb_t __sn_readb_relaxed; +extern ia64_mv_readw_t __sn_readw_relaxed; +extern ia64_mv_readl_t __sn_readl_relaxed; +extern ia64_mv_readq_t __sn_readq_relaxed; +extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; +extern ia64_mv_dma_free_coherent sn_dma_free_coherent; +extern ia64_mv_dma_map_single sn_dma_map_single; +extern ia64_mv_dma_unmap_single sn_dma_unmap_single; +extern ia64_mv_dma_map_sg sn_dma_map_sg; +extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg; +extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; +extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; +extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; +extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; +extern ia64_mv_dma_mapping_error sn_dma_mapping_error; +extern ia64_mv_dma_supported sn_dma_supported; + +/* + * This stuff has dual use! + * + * For a generic kernel, the macros are used to initialize the + * platform's machvec structure. When compiling a non-generic kernel, + * the macros are used directly. + */ +#define platform_name "sn2" +#define platform_setup sn_setup +#define platform_cpu_init sn_cpu_init +#define platform_irq_init sn_irq_init +#define platform_send_ipi sn2_send_IPI +#define platform_timer_interrupt sn_timer_interrupt +#define platform_global_tlb_purge sn2_global_tlb_purge +#define platform_tlb_migrate_finish sn_tlb_migrate_finish +#define platform_pci_fixup sn_pci_fixup +#define platform_inb __sn_inb +#define platform_inw __sn_inw +#define platform_inl __sn_inl +#define platform_outb __sn_outb +#define platform_outw __sn_outw +#define platform_outl __sn_outl +#define platform_mmiowb __sn_mmiowb +#define platform_readb __sn_readb +#define platform_readw __sn_readw +#define platform_readl __sn_readl +#define platform_readq __sn_readq +#define platform_readb_relaxed __sn_readb_relaxed +#define platform_readw_relaxed __sn_readw_relaxed +#define platform_readl_relaxed __sn_readl_relaxed +#define platform_readq_relaxed __sn_readq_relaxed +#define platform_local_vector_to_irq sn_local_vector_to_irq +#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem +#define platform_pci_legacy_read sn_pci_legacy_read +#define platform_pci_legacy_write sn_pci_legacy_write +#define platform_dma_init machvec_noop +#define platform_dma_alloc_coherent sn_dma_alloc_coherent +#define platform_dma_free_coherent sn_dma_free_coherent +#define platform_dma_map_single sn_dma_map_single +#define platform_dma_unmap_single sn_dma_unmap_single +#define platform_dma_map_sg sn_dma_map_sg +#define platform_dma_unmap_sg sn_dma_unmap_sg +#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu +#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu +#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device +#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device +#define platform_dma_mapping_error sn_dma_mapping_error +#define platform_dma_supported sn_dma_supported + +#include <asm/sn/io.h> + +#endif /* _ASM_IA64_MACHVEC_SN2_H */ diff --git a/include/asm-ia64/mc146818rtc.h b/include/asm-ia64/mc146818rtc.h new file mode 100644 index 000000000000..407787a237ba --- /dev/null +++ b/include/asm-ia64/mc146818rtc.h @@ -0,0 +1,10 @@ +#ifndef _ASM_IA64_MC146818RTC_H +#define _ASM_IA64_MC146818RTC_H + +/* + * Machine dependent access functions for RTC registers. + */ + +/* empty include file to satisfy the include in genrtc.c */ + +#endif /* _ASM_IA64_MC146818RTC_H */ diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h new file mode 100644 index 000000000000..149ad0118455 --- /dev/null +++ b/include/asm-ia64/mca.h @@ -0,0 +1,132 @@ +/* + * File: mca.h + * Purpose: Machine check handling specific defines + * + * Copyright (C) 1999, 2004 Silicon Graphics, Inc. + * Copyright (C) Vijay Chander (vijay@engr.sgi.com) + * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com) + * Copyright (C) Russ Anderson (rja@sgi.com) + */ + +#ifndef _ASM_IA64_MCA_H +#define _ASM_IA64_MCA_H + +#define IA64_MCA_STACK_SIZE 8192 + +#if !defined(__ASSEMBLY__) + +#include <linux/interrupt.h> +#include <linux/types.h> + +#include <asm/param.h> +#include <asm/sal.h> +#include <asm/processor.h> +#include <asm/mca_asm.h> + +#define IA64_MCA_RENDEZ_TIMEOUT (20 * 1000) /* value in milliseconds - 20 seconds */ + +typedef struct ia64_fptr { + unsigned long fp; + unsigned long gp; +} ia64_fptr_t; + +typedef union cmcv_reg_u { + u64 cmcv_regval; + struct { + u64 cmcr_vector : 8; + u64 cmcr_reserved1 : 4; + u64 cmcr_ignored1 : 1; + u64 cmcr_reserved2 : 3; + u64 cmcr_mask : 1; + u64 cmcr_ignored2 : 47; + } cmcv_reg_s; + +} cmcv_reg_t; + +#define cmcv_mask cmcv_reg_s.cmcr_mask +#define cmcv_vector cmcv_reg_s.cmcr_vector + +enum { + IA64_MCA_RENDEZ_CHECKIN_NOTDONE = 0x0, + IA64_MCA_RENDEZ_CHECKIN_DONE = 0x1 +}; + +/* Information maintained by the MC infrastructure */ +typedef struct ia64_mc_info_s { + u64 imi_mca_handler; + size_t imi_mca_handler_size; + u64 imi_monarch_init_handler; + size_t imi_monarch_init_handler_size; + u64 imi_slave_init_handler; + size_t imi_slave_init_handler_size; + u8 imi_rendez_checkin[NR_CPUS]; + +} ia64_mc_info_t; + +typedef struct ia64_mca_sal_to_os_state_s { + u64 imsto_os_gp; /* GP of the os registered with the SAL */ + u64 imsto_pal_proc; /* PAL_PROC entry point - physical addr */ + u64 imsto_sal_proc; /* SAL_PROC entry point - physical addr */ + u64 imsto_sal_gp; /* GP of the SAL - physical */ + u64 imsto_rendez_state; /* Rendez state information */ + u64 imsto_sal_check_ra; /* Return address in SAL_CHECK while going + * back to SAL from OS after MCA handling. + */ + u64 pal_min_state; /* from PAL in r17 */ + u64 proc_state_param; /* from PAL in r18. See SDV 2:268 11.3.2.1 */ +} ia64_mca_sal_to_os_state_t; + +enum { + IA64_MCA_CORRECTED = 0x0, /* Error has been corrected by OS_MCA */ + IA64_MCA_WARM_BOOT = -1, /* Warm boot of the system need from SAL */ + IA64_MCA_COLD_BOOT = -2, /* Cold boot of the system need from SAL */ + IA64_MCA_HALT = -3 /* System to be halted by SAL */ +}; + +enum { + IA64_MCA_SAME_CONTEXT = 0x0, /* SAL to return to same context */ + IA64_MCA_NEW_CONTEXT = -1 /* SAL to return to new context */ +}; + +typedef struct ia64_mca_os_to_sal_state_s { + u64 imots_os_status; /* OS status to SAL as to what happened + * with the MCA handling. + */ + u64 imots_sal_gp; /* GP of the SAL - physical */ + u64 imots_context; /* 0 if return to same context + 1 if return to new context */ + u64 *imots_new_min_state; /* Pointer to structure containing + * new values of registers in the min state + * save area. + */ + u64 imots_sal_check_ra; /* Return address in SAL_CHECK while going + * back to SAL from OS after MCA handling. + */ +} ia64_mca_os_to_sal_state_t; + +/* Per-CPU MCA state that is too big for normal per-CPU variables. */ + +struct ia64_mca_cpu { + u64 stack[IA64_MCA_STACK_SIZE/8]; /* MCA memory-stack */ + u64 proc_state_dump[512]; + u64 stackframe[32]; + u64 rbstore[IA64_MCA_STACK_SIZE/8]; /* MCA reg.-backing store */ + u64 init_stack[KERNEL_STACK_SIZE/8]; +} __attribute__ ((aligned(16))); + +/* Array of physical addresses of each CPU's MCA area. */ +extern unsigned long __per_cpu_mca[NR_CPUS]; + +extern void ia64_mca_init(void); +extern void ia64_mca_cpu_init(void *); +extern void ia64_os_mca_dispatch(void); +extern void ia64_os_mca_dispatch_end(void); +extern void ia64_mca_ucmc_handler(void); +extern void ia64_monarch_init_handler(void); +extern void ia64_slave_init_handler(void); +extern void ia64_mca_cmc_vector_setup(void); +extern int ia64_reg_MCA_extension(void*); +extern void ia64_unreg_MCA_extension(void); + +#endif /* !__ASSEMBLY__ */ +#endif /* _ASM_IA64_MCA_H */ diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h new file mode 100644 index 000000000000..836953e0f91f --- /dev/null +++ b/include/asm-ia64/mca_asm.h @@ -0,0 +1,312 @@ +/* + * File: mca_asm.h + * + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) Vijay Chander (vijay@engr.sgi.com) + * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> + * Copyright (C) 2000 Hewlett-Packard Co. + * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 2002 Intel Corp. + * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> + */ +#ifndef _ASM_IA64_MCA_ASM_H +#define _ASM_IA64_MCA_ASM_H + +#define PSR_IC 13 +#define PSR_I 14 +#define PSR_DT 17 +#define PSR_RT 27 +#define PSR_MC 35 +#define PSR_IT 36 +#define PSR_BN 44 + +/* + * This macro converts a instruction virtual address to a physical address + * Right now for simulation purposes the virtual addresses are + * direct mapped to physical addresses. + * 1. Lop off bits 61 thru 63 in the virtual address + */ +#define INST_VA_TO_PA(addr) \ + dep addr = 0, addr, 61, 3 +/* + * This macro converts a data virtual address to a physical address + * Right now for simulation purposes the virtual addresses are + * direct mapped to physical addresses. + * 1. Lop off bits 61 thru 63 in the virtual address + */ +#define DATA_VA_TO_PA(addr) \ + tpa addr = addr +/* + * This macro converts a data physical address to a virtual address + * Right now for simulation purposes the virtual addresses are + * direct mapped to physical addresses. + * 1. Put 0x7 in bits 61 thru 63. + */ +#define DATA_PA_TO_VA(addr,temp) \ + mov temp = 0x7 ;; \ + dep addr = temp, addr, 61, 3 + +#define GET_THIS_PADDR(reg, var) \ + mov reg = IA64_KR(PER_CPU_DATA);; \ + addl reg = THIS_CPU(var), reg + +/* + * This macro jumps to the instruction at the given virtual address + * and starts execution in physical mode with all the address + * translations turned off. + * 1. Save the current psr + * 2. Make sure that all the upper 32 bits are off + * + * 3. Clear the interrupt enable and interrupt state collection bits + * in the psr before updating the ipsr and iip. + * + * 4. Turn off the instruction, data and rse translation bits of the psr + * and store the new value into ipsr + * Also make sure that the interrupts are disabled. + * Ensure that we are in little endian mode. + * [psr.{rt, it, dt, i, be} = 0] + * + * 5. Get the physical address corresponding to the virtual address + * of the next instruction bundle and put it in iip. + * (Using magic numbers 24 and 40 in the deposint instruction since + * the IA64_SDK code directly maps to lower 24bits as physical address + * from a virtual address). + * + * 6. Do an rfi to move the values from ipsr to psr and iip to ip. + */ +#define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \ + mov old_psr = psr; \ + ;; \ + dep old_psr = 0, old_psr, 32, 32; \ + \ + mov ar.rsc = 0 ; \ + ;; \ + srlz.d; \ + mov temp2 = ar.bspstore; \ + ;; \ + DATA_VA_TO_PA(temp2); \ + ;; \ + mov temp1 = ar.rnat; \ + ;; \ + mov ar.bspstore = temp2; \ + ;; \ + mov ar.rnat = temp1; \ + mov temp1 = psr; \ + mov temp2 = psr; \ + ;; \ + \ + dep temp2 = 0, temp2, PSR_IC, 2; \ + ;; \ + mov psr.l = temp2; \ + ;; \ + srlz.d; \ + dep temp1 = 0, temp1, 32, 32; \ + ;; \ + dep temp1 = 0, temp1, PSR_IT, 1; \ + ;; \ + dep temp1 = 0, temp1, PSR_DT, 1; \ + ;; \ + dep temp1 = 0, temp1, PSR_RT, 1; \ + ;; \ + dep temp1 = 0, temp1, PSR_I, 1; \ + ;; \ + dep temp1 = 0, temp1, PSR_IC, 1; \ + ;; \ + dep temp1 = -1, temp1, PSR_MC, 1; \ + ;; \ + mov cr.ipsr = temp1; \ + ;; \ + LOAD_PHYSICAL(p0, temp2, start_addr); \ + ;; \ + mov cr.iip = temp2; \ + mov cr.ifs = r0; \ + DATA_VA_TO_PA(sp); \ + DATA_VA_TO_PA(gp); \ + ;; \ + srlz.i; \ + ;; \ + nop 1; \ + nop 2; \ + nop 1; \ + nop 2; \ + rfi; \ + ;; + +/* + * This macro jumps to the instruction at the given virtual address + * and starts execution in virtual mode with all the address + * translations turned on. + * 1. Get the old saved psr + * + * 2. Clear the interrupt state collection bit in the current psr. + * + * 3. Set the instruction translation bit back in the old psr + * Note we have to do this since we are right now saving only the + * lower 32-bits of old psr.(Also the old psr has the data and + * rse translation bits on) + * + * 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1. + * + * 5. Reset the current thread pointer (r13). + * + * 6. Set iip to the virtual address of the next instruction bundle. + * + * 7. Do an rfi to move ipsr to psr and iip to ip. + */ + +#define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \ + mov temp2 = psr; \ + ;; \ + mov old_psr = temp2; \ + ;; \ + dep temp2 = 0, temp2, PSR_IC, 2; \ + ;; \ + mov psr.l = temp2; \ + mov ar.rsc = 0; \ + ;; \ + srlz.d; \ + mov r13 = ar.k6; \ + mov temp2 = ar.bspstore; \ + ;; \ + DATA_PA_TO_VA(temp2,temp1); \ + ;; \ + mov temp1 = ar.rnat; \ + ;; \ + mov ar.bspstore = temp2; \ + ;; \ + mov ar.rnat = temp1; \ + ;; \ + mov temp1 = old_psr; \ + ;; \ + mov temp2 = 1; \ + ;; \ + dep temp1 = temp2, temp1, PSR_IC, 1; \ + ;; \ + dep temp1 = temp2, temp1, PSR_IT, 1; \ + ;; \ + dep temp1 = temp2, temp1, PSR_DT, 1; \ + ;; \ + dep temp1 = temp2, temp1, PSR_RT, 1; \ + ;; \ + dep temp1 = temp2, temp1, PSR_BN, 1; \ + ;; \ + \ + mov cr.ipsr = temp1; \ + movl temp2 = start_addr; \ + ;; \ + mov cr.iip = temp2; \ + ;; \ + DATA_PA_TO_VA(sp, temp1); \ + DATA_PA_TO_VA(gp, temp2); \ + srlz.i; \ + ;; \ + nop 1; \ + nop 2; \ + nop 1; \ + rfi \ + ;; + +/* + * The following offsets capture the order in which the + * RSE related registers from the old context are + * saved onto the new stack frame. + * + * +-----------------------+ + * |NDIRTY [BSP - BSPSTORE]| + * +-----------------------+ + * | RNAT | + * +-----------------------+ + * | BSPSTORE | + * +-----------------------+ + * | IFS | + * +-----------------------+ + * | PFS | + * +-----------------------+ + * | RSC | + * +-----------------------+ <-------- Bottom of new stack frame + */ +#define rse_rsc_offset 0 +#define rse_pfs_offset (rse_rsc_offset+0x08) +#define rse_ifs_offset (rse_pfs_offset+0x08) +#define rse_bspstore_offset (rse_ifs_offset+0x08) +#define rse_rnat_offset (rse_bspstore_offset+0x08) +#define rse_ndirty_offset (rse_rnat_offset+0x08) + +/* + * rse_switch_context + * + * 1. Save old RSC onto the new stack frame + * 2. Save PFS onto new stack frame + * 3. Cover the old frame and start a new frame. + * 4. Save IFS onto new stack frame + * 5. Save the old BSPSTORE on the new stack frame + * 6. Save the old RNAT on the new stack frame + * 7. Write BSPSTORE with the new backing store pointer + * 8. Read and save the new BSP to calculate the #dirty registers + * NOTE: Look at pages 11-10, 11-11 in PRM Vol 2 + */ +#define rse_switch_context(temp,p_stackframe,p_bspstore) \ + ;; \ + mov temp=ar.rsc;; \ + st8 [p_stackframe]=temp,8;; \ + mov temp=ar.pfs;; \ + st8 [p_stackframe]=temp,8; \ + cover ;; \ + mov temp=cr.ifs;; \ + st8 [p_stackframe]=temp,8;; \ + mov temp=ar.bspstore;; \ + st8 [p_stackframe]=temp,8;; \ + mov temp=ar.rnat;; \ + st8 [p_stackframe]=temp,8; \ + mov ar.bspstore=p_bspstore;; \ + mov temp=ar.bsp;; \ + sub temp=temp,p_bspstore;; \ + st8 [p_stackframe]=temp,8;; + +/* + * rse_return_context + * 1. Allocate a zero-sized frame + * 2. Store the number of dirty registers RSC.loadrs field + * 3. Issue a loadrs to insure that any registers from the interrupted + * context which were saved on the new stack frame have been loaded + * back into the stacked registers + * 4. Restore BSPSTORE + * 5. Restore RNAT + * 6. Restore PFS + * 7. Restore IFS + * 8. Restore RSC + * 9. Issue an RFI + */ +#define rse_return_context(psr_mask_reg,temp,p_stackframe) \ + ;; \ + alloc temp=ar.pfs,0,0,0,0; \ + add p_stackframe=rse_ndirty_offset,p_stackframe;; \ + ld8 temp=[p_stackframe];; \ + shl temp=temp,16;; \ + mov ar.rsc=temp;; \ + loadrs;; \ + add p_stackframe=-rse_ndirty_offset+rse_bspstore_offset,p_stackframe;;\ + ld8 temp=[p_stackframe];; \ + mov ar.bspstore=temp;; \ + add p_stackframe=-rse_bspstore_offset+rse_rnat_offset,p_stackframe;;\ + ld8 temp=[p_stackframe];; \ + mov ar.rnat=temp;; \ + add p_stackframe=-rse_rnat_offset+rse_pfs_offset,p_stackframe;; \ + ld8 temp=[p_stackframe];; \ + mov ar.pfs=temp;; \ + add p_stackframe=-rse_pfs_offset+rse_ifs_offset,p_stackframe;; \ + ld8 temp=[p_stackframe];; \ + mov cr.ifs=temp;; \ + add p_stackframe=-rse_ifs_offset+rse_rsc_offset,p_stackframe;; \ + ld8 temp=[p_stackframe];; \ + mov ar.rsc=temp ; \ + mov temp=psr;; \ + or temp=temp,psr_mask_reg;; \ + mov cr.ipsr=temp;; \ + mov temp=ip;; \ + add temp=0x30,temp;; \ + mov cr.iip=temp;; \ + srlz.i;; \ + rfi;; + +#endif /* _ASM_IA64_MCA_ASM_H */ diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h new file mode 100644 index 000000000000..1590dc65b30b --- /dev/null +++ b/include/asm-ia64/meminit.h @@ -0,0 +1,60 @@ +#ifndef meminit_h +#define meminit_h + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/config.h> + +/* + * Entries defined so far: + * - boot param structure itself + * - memory map + * - initrd (optional) + * - command line string + * - kernel code & data + * + * More could be added if necessary + */ +#define IA64_MAX_RSVD_REGIONS 5 + +struct rsvd_region { + unsigned long start; /* virtual address of beginning of element */ + unsigned long end; /* virtual address of end of element + 1 */ +}; + +extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1]; +extern int num_rsvd_regions; + +extern void find_memory (void); +extern void reserve_memory (void); +extern void find_initrd (void); +extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg); + +/* + * For rounding an address to the next IA64_GRANULE_SIZE or order + */ +#define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1)) +#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1)) +#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1)) + +#ifdef CONFIG_DISCONTIGMEM + extern void call_pernode_memory (unsigned long start, unsigned long len, void *func); +#else +# define call_pernode_memory(start, len, func) (*func)(start, len, 0) +#endif + +#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */ + +#ifdef CONFIG_VIRTUAL_MEM_MAP +# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ + extern unsigned long vmalloc_end; + extern struct page *vmem_map; + extern int find_largest_hole (u64 start, u64 end, void *arg); + extern int create_mem_map_page_table (u64 start, u64 end, void *arg); +#endif + +#endif /* meminit_h */ diff --git a/include/asm-ia64/mman.h b/include/asm-ia64/mman.h new file mode 100644 index 000000000000..1c0a73af1461 --- /dev/null +++ b/include/asm-ia64/mman.h @@ -0,0 +1,51 @@ +#ifndef _ASM_IA64_MMAN_H +#define _ASM_IA64_MMAN_H + +/* + * Based on <asm-i386/mman.h>. + * + * Modified 1998-2000, 2002 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ +#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ + +#define MAP_SHARED 0x01 /* Share changes */ +#define MAP_PRIVATE 0x02 /* Changes are private */ +#define MAP_TYPE 0x0f /* Mask for type of mapping */ +#define MAP_FIXED 0x10 /* Interpret addr exactly */ +#define MAP_ANONYMOUS 0x20 /* don't use a file */ + +#define MAP_GROWSDOWN 0x00100 /* stack-like segment */ +#define MAP_GROWSUP 0x00200 /* register stack-like segment */ +#define MAP_DENYWRITE 0x00800 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x01000 /* mark it as an executable */ +#define MAP_LOCKED 0x02000 /* pages are locked */ +#define MAP_NORESERVE 0x04000 /* don't check for reservations */ +#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x10000 /* do not block on IO */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_INVALIDATE 2 /* invalidate the caches */ +#define MS_SYNC 4 /* synchronous memory sync */ + +#define MCL_CURRENT 1 /* lock all current mappings */ +#define MCL_FUTURE 2 /* lock all future mappings */ + +#define MADV_NORMAL 0x0 /* default page-in behavior */ +#define MADV_RANDOM 0x1 /* page-in minimum required */ +#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */ +#define MADV_WILLNEED 0x3 /* pre-fault pages */ +#define MADV_DONTNEED 0x4 /* discard these pages */ + +/* compatibility flags */ +#define MAP_ANON MAP_ANONYMOUS +#define MAP_FILE 0 + +#endif /* _ASM_IA64_MMAN_H */ diff --git a/include/asm-ia64/mmu.h b/include/asm-ia64/mmu.h new file mode 100644 index 000000000000..ae1525352a25 --- /dev/null +++ b/include/asm-ia64/mmu.h @@ -0,0 +1,11 @@ +#ifndef __MMU_H +#define __MMU_H + +/* + * Type for a context number. We declare it volatile to ensure proper ordering when it's + * accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and + * init_new_context()). + */ +typedef volatile unsigned long mm_context_t; + +#endif diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h new file mode 100644 index 000000000000..0096e7e05012 --- /dev/null +++ b/include/asm-ia64/mmu_context.h @@ -0,0 +1,170 @@ +#ifndef _ASM_IA64_MMU_CONTEXT_H +#define _ASM_IA64_MMU_CONTEXT_H + +/* + * Copyright (C) 1998-2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +/* + * Routines to manage the allocation of task context numbers. Task context numbers are + * used to reduce or eliminate the need to perform TLB flushes due to context switches. + * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not + * consider the region number when performing a TLB lookup, we need to assign a unique + * region id to each region in a process. We use the least significant three bits in a + * region id for this purpose. + */ + +#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */ + +#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61)) + +# ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <linux/percpu.h> +#include <linux/sched.h> +#include <linux/spinlock.h> + +#include <asm/processor.h> + +struct ia64_ctx { + spinlock_t lock; + unsigned int next; /* next context number to use */ + unsigned int limit; /* next >= limit => must call wrap_mmu_context() */ + unsigned int max_ctx; /* max. context value supported by all CPUs */ +}; + +extern struct ia64_ctx ia64_ctx; +DECLARE_PER_CPU(u8, ia64_need_tlb_flush); + +extern void wrap_mmu_context (struct mm_struct *mm); + +static inline void +enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk) +{ +} + +/* + * When the context counter wraps around all TLBs need to be flushed because an old + * context number might have been reused. This is signalled by the ia64_need_tlb_flush + * per-CPU variable, which is checked in the routine below. Called by activate_mm(). + * <efocht@ess.nec.de> + */ +static inline void +delayed_tlb_flush (void) +{ + extern void local_flush_tlb_all (void); + + if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) { + local_flush_tlb_all(); + __ia64_per_cpu_var(ia64_need_tlb_flush) = 0; + } +} + +static inline mm_context_t +get_mmu_context (struct mm_struct *mm) +{ + unsigned long flags; + mm_context_t context = mm->context; + + if (context) + return context; + + spin_lock_irqsave(&ia64_ctx.lock, flags); + { + /* re-check, now that we've got the lock: */ + context = mm->context; + if (context == 0) { + cpus_clear(mm->cpu_vm_mask); + if (ia64_ctx.next >= ia64_ctx.limit) + wrap_mmu_context(mm); + mm->context = context = ia64_ctx.next++; + } + } + spin_unlock_irqrestore(&ia64_ctx.lock, flags); + return context; +} + +/* + * Initialize context number to some sane value. MM is guaranteed to be a brand-new + * address-space, so no TLB flushing is needed, ever. + */ +static inline int +init_new_context (struct task_struct *p, struct mm_struct *mm) +{ + mm->context = 0; + return 0; +} + +static inline void +destroy_context (struct mm_struct *mm) +{ + /* Nothing to do. */ +} + +static inline void +reload_context (mm_context_t context) +{ + unsigned long rid; + unsigned long rid_incr = 0; + unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4; + + old_rr4 = ia64_get_rr(0x8000000000000000UL); + rid = context << 3; /* make space for encoding the region number */ + rid_incr = 1 << 8; + + /* encode the region id, preferred page size, and VHPT enable bit: */ + rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1; + rr1 = rr0 + 1*rid_incr; + rr2 = rr0 + 2*rid_incr; + rr3 = rr0 + 3*rid_incr; + rr4 = rr0 + 4*rid_incr; +#ifdef CONFIG_HUGETLB_PAGE + rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc); +#endif + + ia64_set_rr(0x0000000000000000UL, rr0); + ia64_set_rr(0x2000000000000000UL, rr1); + ia64_set_rr(0x4000000000000000UL, rr2); + ia64_set_rr(0x6000000000000000UL, rr3); + ia64_set_rr(0x8000000000000000UL, rr4); + ia64_srlz_i(); /* srlz.i implies srlz.d */ +} + +static inline void +activate_context (struct mm_struct *mm) +{ + mm_context_t context; + + do { + context = get_mmu_context(mm); + if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) + cpu_set(smp_processor_id(), mm->cpu_vm_mask); + reload_context(context); + /* in the unlikely event of a TLB-flush by another thread, redo the load: */ + } while (unlikely(context != mm->context)); +} + +#define deactivate_mm(tsk,mm) do { } while (0) + +/* + * Switch from address space PREV to address space NEXT. + */ +static inline void +activate_mm (struct mm_struct *prev, struct mm_struct *next) +{ + delayed_tlb_flush(); + + /* + * We may get interrupts here, but that's OK because interrupt handlers cannot + * touch user-space. + */ + ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd)); + activate_context(next); +} + +#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm) + +# endif /* ! __ASSEMBLY__ */ +#endif /* _ASM_IA64_MMU_CONTEXT_H */ diff --git a/include/asm-ia64/mmzone.h b/include/asm-ia64/mmzone.h new file mode 100644 index 000000000000..9491dacc89cf --- /dev/null +++ b/include/asm-ia64/mmzone.h @@ -0,0 +1,32 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2000,2003 Silicon Graphics, Inc. All rights reserved. + * Copyright (c) 2002 NEC Corp. + * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de> + * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com> + */ +#ifndef _ASM_IA64_MMZONE_H +#define _ASM_IA64_MMZONE_H + +#include <linux/numa.h> +#include <asm/page.h> +#include <asm/meminit.h> + +#ifdef CONFIG_DISCONTIGMEM + +#ifdef CONFIG_IA64_DIG /* DIG systems are small */ +# define MAX_PHYSNODE_ID 8 +# define NR_NODE_MEMBLKS (MAX_NUMNODES * 8) +#else /* sn2 is the biggest case, so we use that if !DIG */ +# define MAX_PHYSNODE_ID 2048 +# define NR_NODE_MEMBLKS (MAX_NUMNODES * 4) +#endif + +#else /* CONFIG_DISCONTIGMEM */ +# define NR_NODE_MEMBLKS (MAX_NUMNODES * 4) +#endif /* CONFIG_DISCONTIGMEM */ + +#endif /* _ASM_IA64_MMZONE_H */ diff --git a/include/asm-ia64/module.h b/include/asm-ia64/module.h new file mode 100644 index 000000000000..85c82bd819f2 --- /dev/null +++ b/include/asm-ia64/module.h @@ -0,0 +1,35 @@ +#ifndef _ASM_IA64_MODULE_H +#define _ASM_IA64_MODULE_H + +/* + * IA-64-specific support for kernel module loader. + * + * Copyright (C) 2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +struct elf64_shdr; /* forward declration */ + +struct mod_arch_specific { + struct elf64_shdr *core_plt; /* core PLT section */ + struct elf64_shdr *init_plt; /* init PLT section */ + struct elf64_shdr *got; /* global offset table */ + struct elf64_shdr *opd; /* official procedure descriptors */ + struct elf64_shdr *unwind; /* unwind-table section */ + unsigned long gp; /* global-pointer for module */ + + void *core_unw_table; /* core unwind-table cookie returned by unwinder */ + void *init_unw_table; /* init unwind-table cookie returned by unwinder */ + unsigned int next_got_entry; /* index of next available got entry */ +}; + +#define Elf_Shdr Elf64_Shdr +#define Elf_Sym Elf64_Sym +#define Elf_Ehdr Elf64_Ehdr + +#define MODULE_PROC_FAMILY "ia64" +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY + +#define ARCH_SHF_SMALL SHF_IA_64_SHORT + +#endif /* _ASM_IA64_MODULE_H */ diff --git a/include/asm-ia64/msgbuf.h b/include/asm-ia64/msgbuf.h new file mode 100644 index 000000000000..6c64c0d2aae1 --- /dev/null +++ b/include/asm-ia64/msgbuf.h @@ -0,0 +1,27 @@ +#ifndef _ASM_IA64_MSGBUF_H +#define _ASM_IA64_MSGBUF_H + +/* + * The msqid64_ds structure for IA-64 architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + __kernel_time_t msg_rtime; /* last msgrcv time */ + __kernel_time_t msg_ctime; /* last change time */ + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _ASM_IA64_MSGBUF_H */ diff --git a/include/asm-ia64/msi.h b/include/asm-ia64/msi.h new file mode 100644 index 000000000000..60f2137f9278 --- /dev/null +++ b/include/asm-ia64/msi.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2003-2004 Intel + * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) + */ + +#ifndef ASM_MSI_H +#define ASM_MSI_H + +#define NR_VECTORS NR_IRQS +#define FIRST_DEVICE_VECTOR IA64_FIRST_DEVICE_VECTOR +#define LAST_DEVICE_VECTOR IA64_LAST_DEVICE_VECTOR +static inline void set_intr_gate (int nr, void *func) {} +#define IO_APIC_VECTOR(irq) (irq) +#define ack_APIC_irq ia64_eoi +#define cpu_mask_to_apicid(mask) cpu_physical_id(first_cpu(mask)) +#define MSI_DEST_MODE MSI_PHYSICAL_MODE +#define MSI_TARGET_CPU ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) +#define MSI_TARGET_CPU_SHIFT 4 + +#endif /* ASM_MSI_H */ diff --git a/include/asm-ia64/namei.h b/include/asm-ia64/namei.h new file mode 100644 index 000000000000..78e768079083 --- /dev/null +++ b/include/asm-ia64/namei.h @@ -0,0 +1,25 @@ +#ifndef _ASM_IA64_NAMEI_H +#define _ASM_IA64_NAMEI_H + +/* + * Modified 1998, 1999, 2001 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +#include <asm/ptrace.h> +#include <asm/system.h> + +#define EMUL_PREFIX_LINUX_IA32 "/emul/ia32-linux/" + +static inline char * +__emul_prefix (void) +{ + switch (current->personality) { + case PER_LINUX32: + return EMUL_PREFIX_LINUX_IA32; + default: + return NULL; + } +} + +#endif /* _ASM_IA64_NAMEI_H */ diff --git a/include/asm-ia64/nodedata.h b/include/asm-ia64/nodedata.h new file mode 100644 index 000000000000..6b0f3ed89b7e --- /dev/null +++ b/include/asm-ia64/nodedata.h @@ -0,0 +1,52 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved. + * Copyright (c) 2002 NEC Corp. + * Copyright (c) 2002 Erich Focht <efocht@ess.nec.de> + * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com> + */ +#ifndef _ASM_IA64_NODEDATA_H +#define _ASM_IA64_NODEDATA_H + +#include <linux/config.h> +#include <linux/numa.h> + +#include <asm/percpu.h> +#include <asm/mmzone.h> + +#ifdef CONFIG_DISCONTIGMEM + +/* + * Node Data. One of these structures is located on each node of a NUMA system. + */ + +struct pglist_data; +struct ia64_node_data { + short active_cpu_count; + short node; + struct pglist_data *pg_data_ptrs[MAX_NUMNODES]; +}; + + +/* + * Return a pointer to the node_data structure for the executing cpu. + */ +#define local_node_data (local_cpu_data->node_data) + +/* + * Given a node id, return a pointer to the pg_data_t for the node. + * + * NODE_DATA - should be used in all code not related to system + * initialization. It uses pernode data structures to minimize + * offnode memory references. However, these structure are not + * present during boot. This macro can be used once cpu_init + * completes. + */ +#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid]) + +#endif /* CONFIG_DISCONTIGMEM */ + +#endif /* _ASM_IA64_NODEDATA_H */ diff --git a/include/asm-ia64/numa.h b/include/asm-ia64/numa.h new file mode 100644 index 000000000000..3ae128fe0823 --- /dev/null +++ b/include/asm-ia64/numa.h @@ -0,0 +1,74 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * This file contains NUMA specific prototypes and definitions. + * + * 2002/08/05 Erich Focht <efocht@ess.nec.de> + * + */ +#ifndef _ASM_IA64_NUMA_H +#define _ASM_IA64_NUMA_H + +#include <linux/config.h> + +#ifdef CONFIG_NUMA + +#include <linux/cache.h> +#include <linux/cpumask.h> +#include <linux/numa.h> +#include <linux/smp.h> +#include <linux/threads.h> + +#include <asm/mmzone.h> + +extern u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; +extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; + +/* Stuff below this line could be architecture independent */ + +extern int num_node_memblks; /* total number of memory chunks */ + +/* + * List of node memory chunks. Filled when parsing SRAT table to + * obtain information about memory nodes. +*/ + +struct node_memblk_s { + unsigned long start_paddr; + unsigned long size; + int nid; /* which logical node contains this chunk? */ + int bank; /* which mem bank on this node */ +}; + +struct node_cpuid_s { + u16 phys_id; /* id << 8 | eid */ + int nid; /* logical node containing this CPU */ +}; + +extern struct node_memblk_s node_memblk[NR_NODE_MEMBLKS]; +extern struct node_cpuid_s node_cpuid[NR_CPUS]; + +/* + * ACPI 2.0 SLIT (System Locality Information Table) + * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf + * + * This is a matrix with "distances" between nodes, they should be + * proportional to the memory access latency ratios. + */ + +extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES]; +#define node_distance(from,to) (numa_slit[(from) * num_online_nodes() + (to)]) + +extern int paddr_to_nid(unsigned long paddr); + +#define local_nodeid (cpu_to_node_map[smp_processor_id()]) + +#else /* !CONFIG_NUMA */ + +#define paddr_to_nid(addr) 0 + +#endif /* CONFIG_NUMA */ + +#endif /* _ASM_IA64_NUMA_H */ diff --git a/include/asm-ia64/numnodes.h b/include/asm-ia64/numnodes.h new file mode 100644 index 000000000000..21cff4da5485 --- /dev/null +++ b/include/asm-ia64/numnodes.h @@ -0,0 +1,15 @@ +#ifndef _ASM_MAX_NUMNODES_H +#define _ASM_MAX_NUMNODES_H + +#ifdef CONFIG_IA64_DIG +/* Max 8 Nodes */ +#define NODES_SHIFT 3 +#elif defined(CONFIG_IA64_HP_ZX1) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) +/* Max 32 Nodes */ +#define NODES_SHIFT 5 +#elif defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC) +/* Max 256 Nodes */ +#define NODES_SHIFT 8 +#endif + +#endif /* _ASM_MAX_NUMNODES_H */ diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h new file mode 100644 index 000000000000..24aab801a8ca --- /dev/null +++ b/include/asm-ia64/page.h @@ -0,0 +1,207 @@ +#ifndef _ASM_IA64_PAGE_H +#define _ASM_IA64_PAGE_H +/* + * Pagetable related stuff. + * + * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/config.h> + +#include <asm/intrinsics.h> +#include <asm/types.h> + +/* + * PAGE_SHIFT determines the actual kernel page size. + */ +#if defined(CONFIG_IA64_PAGE_SIZE_4KB) +# define PAGE_SHIFT 12 +#elif defined(CONFIG_IA64_PAGE_SIZE_8KB) +# define PAGE_SHIFT 13 +#elif defined(CONFIG_IA64_PAGE_SIZE_16KB) +# define PAGE_SHIFT 14 +#elif defined(CONFIG_IA64_PAGE_SIZE_64KB) +# define PAGE_SHIFT 16 +#else +# error Unsupported page size! +#endif + +#define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) +#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) + +#define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */ +#define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT) + +#define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */ + +#ifdef CONFIG_HUGETLB_PAGE +# define REGION_HPAGE (4UL) /* note: this is hardcoded in reload_context()!*/ +# define REGION_SHIFT 61 +# define HPAGE_REGION_BASE (REGION_HPAGE << REGION_SHIFT) +# define HPAGE_SHIFT hpage_shift +# define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */ +# define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT) +# define HPAGE_MASK (~(HPAGE_SIZE - 1)) + +# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA +# define ARCH_HAS_HUGEPAGE_ONLY_RANGE +#endif /* CONFIG_HUGETLB_PAGE */ + +#ifdef __ASSEMBLY__ +# define __pa(x) ((x) - PAGE_OFFSET) +# define __va(x) ((x) + PAGE_OFFSET) +#else /* !__ASSEMBLY */ +# ifdef __KERNEL__ +# define STRICT_MM_TYPECHECKS + +extern void clear_page (void *page); +extern void copy_page (void *to, void *from); + +/* + * clear_user_page() and copy_user_page() can't be inline functions because + * flush_dcache_page() can't be defined until later... + */ +#define clear_user_page(addr, vaddr, page) \ +do { \ + clear_page(addr); \ + flush_dcache_page(page); \ +} while (0) + +#define copy_user_page(to, from, vaddr, page) \ +do { \ + copy_page((to), (from)); \ + flush_dcache_page(page); \ +} while (0) + + +#define alloc_zeroed_user_highpage(vma, vaddr) \ +({ \ + struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr); \ + if (page) \ + flush_dcache_page(page); \ + page; \ +}) + +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) + +#ifdef CONFIG_VIRTUAL_MEM_MAP +extern int ia64_pfn_valid (unsigned long pfn); +#else +# define ia64_pfn_valid(pfn) 1 +#endif + +#ifndef CONFIG_DISCONTIGMEM +# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) +# define page_to_pfn(page) ((unsigned long) (page - mem_map)) +# define pfn_to_page(pfn) (mem_map + (pfn)) +#else +extern struct page *vmem_map; +extern unsigned long max_low_pfn; +# define pfn_valid(pfn) (((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) +# define page_to_pfn(page) ((unsigned long) (page - vmem_map)) +# define pfn_to_page(pfn) (vmem_map + (pfn)) +#endif + +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) + +typedef union ia64_va { + struct { + unsigned long off : 61; /* intra-region offset */ + unsigned long reg : 3; /* region number */ + } f; + unsigned long l; + void *p; +} ia64_va; + +/* + * Note: These macros depend on the fact that PAGE_OFFSET has all + * region bits set to 1 and all other bits set to zero. They are + * expressed in this way to ensure they result in a single "dep" + * instruction. + */ +#define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) +#define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) + +#define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;}) +#define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;}) + +#define REGION_SIZE REGION_NUMBER(1) +#define REGION_KERNEL 7 + +#ifdef CONFIG_HUGETLB_PAGE +# define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \ + | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) +# define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) +# define is_hugepage_only_range(mm, addr, len) \ + (REGION_NUMBER(addr) == REGION_HPAGE && \ + REGION_NUMBER((addr)+(len)) == REGION_HPAGE) +extern unsigned int hpage_shift; +#endif + +static __inline__ int +get_order (unsigned long size) +{ + long double d = size - 1; + long order; + + order = ia64_getf_exp(d); + order = order - PAGE_SHIFT - 0xffff + 1; + if (order < 0) + order = 0; + return order; +} + +# endif /* __KERNEL__ */ +#endif /* !__ASSEMBLY__ */ + +#ifdef STRICT_MM_TYPECHECKS + /* + * These are used to make use of C type-checking.. + */ + typedef struct { unsigned long pte; } pte_t; + typedef struct { unsigned long pmd; } pmd_t; + typedef struct { unsigned long pgd; } pgd_t; + typedef struct { unsigned long pgprot; } pgprot_t; + +# define pte_val(x) ((x).pte) +# define pmd_val(x) ((x).pmd) +# define pgd_val(x) ((x).pgd) +# define pgprot_val(x) ((x).pgprot) + +# define __pte(x) ((pte_t) { (x) } ) +# define __pgprot(x) ((pgprot_t) { (x) } ) + +#else /* !STRICT_MM_TYPECHECKS */ + /* + * .. while these make it easier on the compiler + */ +# ifndef __ASSEMBLY__ + typedef unsigned long pte_t; + typedef unsigned long pmd_t; + typedef unsigned long pgd_t; + typedef unsigned long pgprot_t; +# endif + +# define pte_val(x) (x) +# define pmd_val(x) (x) +# define pgd_val(x) (x) +# define pgprot_val(x) (x) + +# define __pte(x) (x) +# define __pgd(x) (x) +# define __pgprot(x) (x) +#endif /* !STRICT_MM_TYPECHECKS */ + +#define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000) + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \ + (((current->personality & READ_IMPLIES_EXEC) != 0) \ + ? VM_EXEC : 0)) + +#endif /* _ASM_IA64_PAGE_H */ diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h new file mode 100644 index 000000000000..5dd477ffb88e --- /dev/null +++ b/include/asm-ia64/pal.h @@ -0,0 +1,1564 @@ +#ifndef _ASM_IA64_PAL_H +#define _ASM_IA64_PAL_H + +/* + * Processor Abstraction Layer definitions. + * + * This is based on Intel IA-64 Architecture Software Developer's Manual rev 1.0 + * chapter 11 IA-64 Processor Abstraction Layer + * + * Copyright (C) 1998-2001 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Stephane Eranian <eranian@hpl.hp.com> + * Copyright (C) 1999 VA Linux Systems + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> + * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com> + * + * 99/10/01 davidm Make sure we pass zero for reserved parameters. + * 00/03/07 davidm Updated pal_cache_flush() to be in sync with PAL v2.6. + * 00/03/23 cfleck Modified processor min-state save area to match updated PAL & SAL info + * 00/05/24 eranian Updated to latest PAL spec, fix structures bugs, added + * 00/05/25 eranian Support for stack calls, and static physical calls + * 00/06/18 eranian Support for stacked physical calls + */ + +/* + * Note that some of these calls use a static-register only calling + * convention which has nothing to do with the regular calling + * convention. + */ +#define PAL_CACHE_FLUSH 1 /* flush i/d cache */ +#define PAL_CACHE_INFO 2 /* get detailed i/d cache info */ +#define PAL_CACHE_INIT 3 /* initialize i/d cache */ +#define PAL_CACHE_SUMMARY 4 /* get summary of cache heirarchy */ +#define PAL_MEM_ATTRIB 5 /* list supported memory attributes */ +#define PAL_PTCE_INFO 6 /* purge TLB info */ +#define PAL_VM_INFO 7 /* return supported virtual memory features */ +#define PAL_VM_SUMMARY 8 /* return summary on supported vm features */ +#define PAL_BUS_GET_FEATURES 9 /* return processor bus interface features settings */ +#define PAL_BUS_SET_FEATURES 10 /* set processor bus features */ +#define PAL_DEBUG_INFO 11 /* get number of debug registers */ +#define PAL_FIXED_ADDR 12 /* get fixed component of processors's directed address */ +#define PAL_FREQ_BASE 13 /* base frequency of the platform */ +#define PAL_FREQ_RATIOS 14 /* ratio of processor, bus and ITC frequency */ +#define PAL_PERF_MON_INFO 15 /* return performance monitor info */ +#define PAL_PLATFORM_ADDR 16 /* set processor interrupt block and IO port space addr */ +#define PAL_PROC_GET_FEATURES 17 /* get configurable processor features & settings */ +#define PAL_PROC_SET_FEATURES 18 /* enable/disable configurable processor features */ +#define PAL_RSE_INFO 19 /* return rse information */ +#define PAL_VERSION 20 /* return version of PAL code */ +#define PAL_MC_CLEAR_LOG 21 /* clear all processor log info */ +#define PAL_MC_DRAIN 22 /* drain operations which could result in an MCA */ +#define PAL_MC_EXPECTED 23 /* set/reset expected MCA indicator */ +#define PAL_MC_DYNAMIC_STATE 24 /* get processor dynamic state */ +#define PAL_MC_ERROR_INFO 25 /* get processor MCA info and static state */ +#define PAL_MC_RESUME 26 /* Return to interrupted process */ +#define PAL_MC_REGISTER_MEM 27 /* Register memory for PAL to use during MCAs and inits */ +#define PAL_HALT 28 /* enter the low power HALT state */ +#define PAL_HALT_LIGHT 29 /* enter the low power light halt state*/ +#define PAL_COPY_INFO 30 /* returns info needed to relocate PAL */ +#define PAL_CACHE_LINE_INIT 31 /* init tags & data of cache line */ +#define PAL_PMI_ENTRYPOINT 32 /* register PMI memory entry points with the processor */ +#define PAL_ENTER_IA_32_ENV 33 /* enter IA-32 system environment */ +#define PAL_VM_PAGE_SIZE 34 /* return vm TC and page walker page sizes */ + +#define PAL_MEM_FOR_TEST 37 /* get amount of memory needed for late processor test */ +#define PAL_CACHE_PROT_INFO 38 /* get i/d cache protection info */ +#define PAL_REGISTER_INFO 39 /* return AR and CR register information*/ +#define PAL_SHUTDOWN 40 /* enter processor shutdown state */ +#define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */ + +#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */ +#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */ +#define PAL_TEST_PROC 258 /* perform late processor self-test */ +#define PAL_CACHE_READ 259 /* read tag & data of cacheline for diagnostic testing */ +#define PAL_CACHE_WRITE 260 /* write tag & data of cacheline for diagnostic testing */ +#define PAL_VM_TR_READ 261 /* read contents of translation register */ + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <asm/fpu.h> + +/* + * Data types needed to pass information into PAL procedures and + * interpret information returned by them. + */ + +/* Return status from the PAL procedure */ +typedef s64 pal_status_t; + +#define PAL_STATUS_SUCCESS 0 /* No error */ +#define PAL_STATUS_UNIMPLEMENTED (-1) /* Unimplemented procedure */ +#define PAL_STATUS_EINVAL (-2) /* Invalid argument */ +#define PAL_STATUS_ERROR (-3) /* Error */ +#define PAL_STATUS_CACHE_INIT_FAIL (-4) /* Could not initialize the + * specified level and type of + * cache without sideeffects + * and "restrict" was 1 + */ + +/* Processor cache level in the heirarchy */ +typedef u64 pal_cache_level_t; +#define PAL_CACHE_LEVEL_L0 0 /* L0 */ +#define PAL_CACHE_LEVEL_L1 1 /* L1 */ +#define PAL_CACHE_LEVEL_L2 2 /* L2 */ + + +/* Processor cache type at a particular level in the heirarchy */ + +typedef u64 pal_cache_type_t; +#define PAL_CACHE_TYPE_INSTRUCTION 1 /* Instruction cache */ +#define PAL_CACHE_TYPE_DATA 2 /* Data or unified cache */ +#define PAL_CACHE_TYPE_INSTRUCTION_DATA 3 /* Both Data & Instruction */ + + +#define PAL_CACHE_FLUSH_INVALIDATE 1 /* Invalidate clean lines */ +#define PAL_CACHE_FLUSH_CHK_INTRS 2 /* check for interrupts/mc while flushing */ + +/* Processor cache line size in bytes */ +typedef int pal_cache_line_size_t; + +/* Processor cache line state */ +typedef u64 pal_cache_line_state_t; +#define PAL_CACHE_LINE_STATE_INVALID 0 /* Invalid */ +#define PAL_CACHE_LINE_STATE_SHARED 1 /* Shared */ +#define PAL_CACHE_LINE_STATE_EXCLUSIVE 2 /* Exclusive */ +#define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */ + +typedef struct pal_freq_ratio { + u64 den : 32, num : 32; /* numerator & denominator */ +} itc_ratio, proc_ratio; + +typedef union pal_cache_config_info_1_s { + struct { + u64 u : 1, /* 0 Unified cache ? */ + at : 2, /* 2-1 Cache mem attr*/ + reserved : 5, /* 7-3 Reserved */ + associativity : 8, /* 16-8 Associativity*/ + line_size : 8, /* 23-17 Line size */ + stride : 8, /* 31-24 Stride */ + store_latency : 8, /*39-32 Store latency*/ + load_latency : 8, /* 47-40 Load latency*/ + store_hints : 8, /* 55-48 Store hints*/ + load_hints : 8; /* 63-56 Load hints */ + } pcci1_bits; + u64 pcci1_data; +} pal_cache_config_info_1_t; + +typedef union pal_cache_config_info_2_s { + struct { + u64 cache_size : 32, /*cache size in bytes*/ + + + alias_boundary : 8, /* 39-32 aliased addr + * separation for max + * performance. + */ + tag_ls_bit : 8, /* 47-40 LSb of addr*/ + tag_ms_bit : 8, /* 55-48 MSb of addr*/ + reserved : 8; /* 63-56 Reserved */ + } pcci2_bits; + u64 pcci2_data; +} pal_cache_config_info_2_t; + + +typedef struct pal_cache_config_info_s { + pal_status_t pcci_status; + pal_cache_config_info_1_t pcci_info_1; + pal_cache_config_info_2_t pcci_info_2; + u64 pcci_reserved; +} pal_cache_config_info_t; + +#define pcci_ld_hints pcci_info_1.pcci1_bits.load_hints +#define pcci_st_hints pcci_info_1.pcci1_bits.store_hints +#define pcci_ld_latency pcci_info_1.pcci1_bits.load_latency +#define pcci_st_latency pcci_info_1.pcci1_bits.store_latency +#define pcci_stride pcci_info_1.pcci1_bits.stride +#define pcci_line_size pcci_info_1.pcci1_bits.line_size +#define pcci_assoc pcci_info_1.pcci1_bits.associativity +#define pcci_cache_attr pcci_info_1.pcci1_bits.at +#define pcci_unified pcci_info_1.pcci1_bits.u +#define pcci_tag_msb pcci_info_2.pcci2_bits.tag_ms_bit +#define pcci_tag_lsb pcci_info_2.pcci2_bits.tag_ls_bit +#define pcci_alias_boundary pcci_info_2.pcci2_bits.alias_boundary +#define pcci_cache_size pcci_info_2.pcci2_bits.cache_size + + + +/* Possible values for cache attributes */ + +#define PAL_CACHE_ATTR_WT 0 /* Write through cache */ +#define PAL_CACHE_ATTR_WB 1 /* Write back cache */ +#define PAL_CACHE_ATTR_WT_OR_WB 2 /* Either write thru or write + * back depending on TLB + * memory attributes + */ + + +/* Possible values for cache hints */ + +#define PAL_CACHE_HINT_TEMP_1 0 /* Temporal level 1 */ +#define PAL_CACHE_HINT_NTEMP_1 1 /* Non-temporal level 1 */ +#define PAL_CACHE_HINT_NTEMP_ALL 3 /* Non-temporal all levels */ + +/* Processor cache protection information */ +typedef union pal_cache_protection_element_u { + u32 pcpi_data; + struct { + u32 data_bits : 8, /* # data bits covered by + * each unit of protection + */ + + tagprot_lsb : 6, /* Least -do- */ + tagprot_msb : 6, /* Most Sig. tag address + * bit that this + * protection covers. + */ + prot_bits : 6, /* # of protection bits */ + method : 4, /* Protection method */ + t_d : 2; /* Indicates which part + * of the cache this + * protection encoding + * applies. + */ + } pcp_info; +} pal_cache_protection_element_t; + +#define pcpi_cache_prot_part pcp_info.t_d +#define pcpi_prot_method pcp_info.method +#define pcpi_prot_bits pcp_info.prot_bits +#define pcpi_tagprot_msb pcp_info.tagprot_msb +#define pcpi_tagprot_lsb pcp_info.tagprot_lsb +#define pcpi_data_bits pcp_info.data_bits + +/* Processor cache part encodings */ +#define PAL_CACHE_PROT_PART_DATA 0 /* Data protection */ +#define PAL_CACHE_PROT_PART_TAG 1 /* Tag protection */ +#define PAL_CACHE_PROT_PART_TAG_DATA 2 /* Tag+data protection (tag is + * more significant ) + */ +#define PAL_CACHE_PROT_PART_DATA_TAG 3 /* Data+tag protection (data is + * more significant ) + */ +#define PAL_CACHE_PROT_PART_MAX 6 + + +typedef struct pal_cache_protection_info_s { + pal_status_t pcpi_status; + pal_cache_protection_element_t pcp_info[PAL_CACHE_PROT_PART_MAX]; +} pal_cache_protection_info_t; + + +/* Processor cache protection method encodings */ +#define PAL_CACHE_PROT_METHOD_NONE 0 /* No protection */ +#define PAL_CACHE_PROT_METHOD_ODD_PARITY 1 /* Odd parity */ +#define PAL_CACHE_PROT_METHOD_EVEN_PARITY 2 /* Even parity */ +#define PAL_CACHE_PROT_METHOD_ECC 3 /* ECC protection */ + + +/* Processor cache line identification in the heirarchy */ +typedef union pal_cache_line_id_u { + u64 pclid_data; + struct { + u64 cache_type : 8, /* 7-0 cache type */ + level : 8, /* 15-8 level of the + * cache in the + * heirarchy. + */ + way : 8, /* 23-16 way in the set + */ + part : 8, /* 31-24 part of the + * cache + */ + reserved : 32; /* 63-32 is reserved*/ + } pclid_info_read; + struct { + u64 cache_type : 8, /* 7-0 cache type */ + level : 8, /* 15-8 level of the + * cache in the + * heirarchy. + */ + way : 8, /* 23-16 way in the set + */ + part : 8, /* 31-24 part of the + * cache + */ + mesi : 8, /* 39-32 cache line + * state + */ + start : 8, /* 47-40 lsb of data to + * invert + */ + length : 8, /* 55-48 #bits to + * invert + */ + trigger : 8; /* 63-56 Trigger error + * by doing a load + * after the write + */ + + } pclid_info_write; +} pal_cache_line_id_u_t; + +#define pclid_read_part pclid_info_read.part +#define pclid_read_way pclid_info_read.way +#define pclid_read_level pclid_info_read.level +#define pclid_read_cache_type pclid_info_read.cache_type + +#define pclid_write_trigger pclid_info_write.trigger +#define pclid_write_length pclid_info_write.length +#define pclid_write_start pclid_info_write.start +#define pclid_write_mesi pclid_info_write.mesi +#define pclid_write_part pclid_info_write.part +#define pclid_write_way pclid_info_write.way +#define pclid_write_level pclid_info_write.level +#define pclid_write_cache_type pclid_info_write.cache_type + +/* Processor cache line part encodings */ +#define PAL_CACHE_LINE_ID_PART_DATA 0 /* Data */ +#define PAL_CACHE_LINE_ID_PART_TAG 1 /* Tag */ +#define PAL_CACHE_LINE_ID_PART_DATA_PROT 2 /* Data protection */ +#define PAL_CACHE_LINE_ID_PART_TAG_PROT 3 /* Tag protection */ +#define PAL_CACHE_LINE_ID_PART_DATA_TAG_PROT 4 /* Data+tag + * protection + */ +typedef struct pal_cache_line_info_s { + pal_status_t pcli_status; /* Return status of the read cache line + * info call. + */ + u64 pcli_data; /* 64-bit data, tag, protection bits .. */ + u64 pcli_data_len; /* data length in bits */ + pal_cache_line_state_t pcli_cache_line_state; /* mesi state */ + +} pal_cache_line_info_t; + + +/* Machine Check related crap */ + +/* Pending event status bits */ +typedef u64 pal_mc_pending_events_t; + +#define PAL_MC_PENDING_MCA (1 << 0) +#define PAL_MC_PENDING_INIT (1 << 1) + +/* Error information type */ +typedef u64 pal_mc_info_index_t; + +#define PAL_MC_INFO_PROCESSOR 0 /* Processor */ +#define PAL_MC_INFO_CACHE_CHECK 1 /* Cache check */ +#define PAL_MC_INFO_TLB_CHECK 2 /* Tlb check */ +#define PAL_MC_INFO_BUS_CHECK 3 /* Bus check */ +#define PAL_MC_INFO_REQ_ADDR 4 /* Requestor address */ +#define PAL_MC_INFO_RESP_ADDR 5 /* Responder address */ +#define PAL_MC_INFO_TARGET_ADDR 6 /* Target address */ +#define PAL_MC_INFO_IMPL_DEP 7 /* Implementation + * dependent + */ + + +typedef struct pal_process_state_info_s { + u64 reserved1 : 2, + rz : 1, /* PAL_CHECK processor + * rendezvous + * successful. + */ + + ra : 1, /* PAL_CHECK attempted + * a rendezvous. + */ + me : 1, /* Distinct multiple + * errors occurred + */ + + mn : 1, /* Min. state save + * area has been + * registered with PAL + */ + + sy : 1, /* Storage integrity + * synched + */ + + + co : 1, /* Continuable */ + ci : 1, /* MC isolated */ + us : 1, /* Uncontained storage + * damage. + */ + + + hd : 1, /* Non-essential hw + * lost (no loss of + * functionality) + * causing the + * processor to run in + * degraded mode. + */ + + tl : 1, /* 1 => MC occurred + * after an instr was + * executed but before + * the trap that + * resulted from instr + * execution was + * generated. + * (Trap Lost ) + */ + mi : 1, /* More information available + * call PAL_MC_ERROR_INFO + */ + pi : 1, /* Precise instruction pointer */ + pm : 1, /* Precise min-state save area */ + + dy : 1, /* Processor dynamic + * state valid + */ + + + in : 1, /* 0 = MC, 1 = INIT */ + rs : 1, /* RSE valid */ + cm : 1, /* MC corrected */ + ex : 1, /* MC is expected */ + cr : 1, /* Control regs valid*/ + pc : 1, /* Perf cntrs valid */ + dr : 1, /* Debug regs valid */ + tr : 1, /* Translation regs + * valid + */ + rr : 1, /* Region regs valid */ + ar : 1, /* App regs valid */ + br : 1, /* Branch regs valid */ + pr : 1, /* Predicate registers + * valid + */ + + fp : 1, /* fp registers valid*/ + b1 : 1, /* Preserved bank one + * general registers + * are valid + */ + b0 : 1, /* Preserved bank zero + * general registers + * are valid + */ + gr : 1, /* General registers + * are valid + * (excl. banked regs) + */ + dsize : 16, /* size of dynamic + * state returned + * by the processor + */ + + reserved2 : 11, + cc : 1, /* Cache check */ + tc : 1, /* TLB check */ + bc : 1, /* Bus check */ + rc : 1, /* Register file check */ + uc : 1; /* Uarch check */ + +} pal_processor_state_info_t; + +typedef struct pal_cache_check_info_s { + u64 op : 4, /* Type of cache + * operation that + * caused the machine + * check. + */ + level : 2, /* Cache level */ + reserved1 : 2, + dl : 1, /* Failure in data part + * of cache line + */ + tl : 1, /* Failure in tag part + * of cache line + */ + dc : 1, /* Failure in dcache */ + ic : 1, /* Failure in icache */ + mesi : 3, /* Cache line state */ + mv : 1, /* mesi valid */ + way : 5, /* Way in which the + * error occurred + */ + wiv : 1, /* Way field valid */ + reserved2 : 10, + + index : 20, /* Cache line index */ + reserved3 : 2, + + is : 1, /* instruction set (1 == ia32) */ + iv : 1, /* instruction set field valid */ + pl : 2, /* privilege level */ + pv : 1, /* privilege level field valid */ + mcc : 1, /* Machine check corrected */ + tv : 1, /* Target address + * structure is valid + */ + rq : 1, /* Requester identifier + * structure is valid + */ + rp : 1, /* Responder identifier + * structure is valid + */ + pi : 1; /* Precise instruction pointer + * structure is valid + */ +} pal_cache_check_info_t; + +typedef struct pal_tlb_check_info_s { + + u64 tr_slot : 8, /* Slot# of TR where + * error occurred + */ + trv : 1, /* tr_slot field is valid */ + reserved1 : 1, + level : 2, /* TLB level where failure occurred */ + reserved2 : 4, + dtr : 1, /* Fail in data TR */ + itr : 1, /* Fail in inst TR */ + dtc : 1, /* Fail in data TC */ + itc : 1, /* Fail in inst. TC */ + op : 4, /* Cache operation */ + reserved3 : 30, + + is : 1, /* instruction set (1 == ia32) */ + iv : 1, /* instruction set field valid */ + pl : 2, /* privilege level */ + pv : 1, /* privilege level field valid */ + mcc : 1, /* Machine check corrected */ + tv : 1, /* Target address + * structure is valid + */ + rq : 1, /* Requester identifier + * structure is valid + */ + rp : 1, /* Responder identifier + * structure is valid + */ + pi : 1; /* Precise instruction pointer + * structure is valid + */ +} pal_tlb_check_info_t; + +typedef struct pal_bus_check_info_s { + u64 size : 5, /* Xaction size */ + ib : 1, /* Internal bus error */ + eb : 1, /* External bus error */ + cc : 1, /* Error occurred + * during cache-cache + * transfer. + */ + type : 8, /* Bus xaction type*/ + sev : 5, /* Bus error severity*/ + hier : 2, /* Bus hierarchy level */ + reserved1 : 1, + bsi : 8, /* Bus error status + * info + */ + reserved2 : 22, + + is : 1, /* instruction set (1 == ia32) */ + iv : 1, /* instruction set field valid */ + pl : 2, /* privilege level */ + pv : 1, /* privilege level field valid */ + mcc : 1, /* Machine check corrected */ + tv : 1, /* Target address + * structure is valid + */ + rq : 1, /* Requester identifier + * structure is valid + */ + rp : 1, /* Responder identifier + * structure is valid + */ + pi : 1; /* Precise instruction pointer + * structure is valid + */ +} pal_bus_check_info_t; + +typedef struct pal_reg_file_check_info_s { + u64 id : 4, /* Register file identifier */ + op : 4, /* Type of register + * operation that + * caused the machine + * check. + */ + reg_num : 7, /* Register number */ + rnv : 1, /* reg_num valid */ + reserved2 : 38, + + is : 1, /* instruction set (1 == ia32) */ + iv : 1, /* instruction set field valid */ + pl : 2, /* privilege level */ + pv : 1, /* privilege level field valid */ + mcc : 1, /* Machine check corrected */ + reserved3 : 3, + pi : 1; /* Precise instruction pointer + * structure is valid + */ +} pal_reg_file_check_info_t; + +typedef struct pal_uarch_check_info_s { + u64 sid : 5, /* Structure identification */ + level : 3, /* Level of failure */ + array_id : 4, /* Array identification */ + op : 4, /* Type of + * operation that + * caused the machine + * check. + */ + way : 6, /* Way of structure */ + wv : 1, /* way valid */ + xv : 1, /* index valid */ + reserved1 : 8, + index : 8, /* Index or set of the uarch + * structure that failed. + */ + reserved2 : 24, + + is : 1, /* instruction set (1 == ia32) */ + iv : 1, /* instruction set field valid */ + pl : 2, /* privilege level */ + pv : 1, /* privilege level field valid */ + mcc : 1, /* Machine check corrected */ + tv : 1, /* Target address + * structure is valid + */ + rq : 1, /* Requester identifier + * structure is valid + */ + rp : 1, /* Responder identifier + * structure is valid + */ + pi : 1; /* Precise instruction pointer + * structure is valid + */ +} pal_uarch_check_info_t; + +typedef union pal_mc_error_info_u { + u64 pmei_data; + pal_processor_state_info_t pme_processor; + pal_cache_check_info_t pme_cache; + pal_tlb_check_info_t pme_tlb; + pal_bus_check_info_t pme_bus; + pal_reg_file_check_info_t pme_reg_file; + pal_uarch_check_info_t pme_uarch; +} pal_mc_error_info_t; + +#define pmci_proc_unknown_check pme_processor.uc +#define pmci_proc_bus_check pme_processor.bc +#define pmci_proc_tlb_check pme_processor.tc +#define pmci_proc_cache_check pme_processor.cc +#define pmci_proc_dynamic_state_size pme_processor.dsize +#define pmci_proc_gpr_valid pme_processor.gr +#define pmci_proc_preserved_bank0_gpr_valid pme_processor.b0 +#define pmci_proc_preserved_bank1_gpr_valid pme_processor.b1 +#define pmci_proc_fp_valid pme_processor.fp +#define pmci_proc_predicate_regs_valid pme_processor.pr +#define pmci_proc_branch_regs_valid pme_processor.br +#define pmci_proc_app_regs_valid pme_processor.ar +#define pmci_proc_region_regs_valid pme_processor.rr +#define pmci_proc_translation_regs_valid pme_processor.tr +#define pmci_proc_debug_regs_valid pme_processor.dr +#define pmci_proc_perf_counters_valid pme_processor.pc +#define pmci_proc_control_regs_valid pme_processor.cr +#define pmci_proc_machine_check_expected pme_processor.ex +#define pmci_proc_machine_check_corrected pme_processor.cm +#define pmci_proc_rse_valid pme_processor.rs +#define pmci_proc_machine_check_or_init pme_processor.in +#define pmci_proc_dynamic_state_valid pme_processor.dy +#define pmci_proc_operation pme_processor.op +#define pmci_proc_trap_lost pme_processor.tl +#define pmci_proc_hardware_damage pme_processor.hd +#define pmci_proc_uncontained_storage_damage pme_processor.us +#define pmci_proc_machine_check_isolated pme_processor.ci +#define pmci_proc_continuable pme_processor.co +#define pmci_proc_storage_intergrity_synced pme_processor.sy +#define pmci_proc_min_state_save_area_regd pme_processor.mn +#define pmci_proc_distinct_multiple_errors pme_processor.me +#define pmci_proc_pal_attempted_rendezvous pme_processor.ra +#define pmci_proc_pal_rendezvous_complete pme_processor.rz + + +#define pmci_cache_level pme_cache.level +#define pmci_cache_line_state pme_cache.mesi +#define pmci_cache_line_state_valid pme_cache.mv +#define pmci_cache_line_index pme_cache.index +#define pmci_cache_instr_cache_fail pme_cache.ic +#define pmci_cache_data_cache_fail pme_cache.dc +#define pmci_cache_line_tag_fail pme_cache.tl +#define pmci_cache_line_data_fail pme_cache.dl +#define pmci_cache_operation pme_cache.op +#define pmci_cache_way_valid pme_cache.wv +#define pmci_cache_target_address_valid pme_cache.tv +#define pmci_cache_way pme_cache.way +#define pmci_cache_mc pme_cache.mc + +#define pmci_tlb_instr_translation_cache_fail pme_tlb.itc +#define pmci_tlb_data_translation_cache_fail pme_tlb.dtc +#define pmci_tlb_instr_translation_reg_fail pme_tlb.itr +#define pmci_tlb_data_translation_reg_fail pme_tlb.dtr +#define pmci_tlb_translation_reg_slot pme_tlb.tr_slot +#define pmci_tlb_mc pme_tlb.mc + +#define pmci_bus_status_info pme_bus.bsi +#define pmci_bus_req_address_valid pme_bus.rq +#define pmci_bus_resp_address_valid pme_bus.rp +#define pmci_bus_target_address_valid pme_bus.tv +#define pmci_bus_error_severity pme_bus.sev +#define pmci_bus_transaction_type pme_bus.type +#define pmci_bus_cache_cache_transfer pme_bus.cc +#define pmci_bus_transaction_size pme_bus.size +#define pmci_bus_internal_error pme_bus.ib +#define pmci_bus_external_error pme_bus.eb +#define pmci_bus_mc pme_bus.mc + +/* + * NOTE: this min_state_save area struct only includes the 1KB + * architectural state save area. The other 3 KB is scratch space + * for PAL. + */ + +typedef struct pal_min_state_area_s { + u64 pmsa_nat_bits; /* nat bits for saved GRs */ + u64 pmsa_gr[15]; /* GR1 - GR15 */ + u64 pmsa_bank0_gr[16]; /* GR16 - GR31 */ + u64 pmsa_bank1_gr[16]; /* GR16 - GR31 */ + u64 pmsa_pr; /* predicate registers */ + u64 pmsa_br0; /* branch register 0 */ + u64 pmsa_rsc; /* ar.rsc */ + u64 pmsa_iip; /* cr.iip */ + u64 pmsa_ipsr; /* cr.ipsr */ + u64 pmsa_ifs; /* cr.ifs */ + u64 pmsa_xip; /* previous iip */ + u64 pmsa_xpsr; /* previous psr */ + u64 pmsa_xfs; /* previous ifs */ + u64 pmsa_br1; /* branch register 1 */ + u64 pmsa_reserved[70]; /* pal_min_state_area should total to 1KB */ +} pal_min_state_area_t; + + +struct ia64_pal_retval { + /* + * A zero status value indicates call completed without error. + * A negative status value indicates reason of call failure. + * A positive status value indicates success but an + * informational value should be printed (e.g., "reboot for + * change to take effect"). + */ + s64 status; + u64 v0; + u64 v1; + u64 v2; +}; + +/* + * Note: Currently unused PAL arguments are generally labeled + * "reserved" so the value specified in the PAL documentation + * (generally 0) MUST be passed. Reserved parameters are not optional + * parameters. + */ +extern struct ia64_pal_retval ia64_pal_call_static (u64, u64, u64, u64, u64); +extern struct ia64_pal_retval ia64_pal_call_stacked (u64, u64, u64, u64); +extern struct ia64_pal_retval ia64_pal_call_phys_static (u64, u64, u64, u64); +extern struct ia64_pal_retval ia64_pal_call_phys_stacked (u64, u64, u64, u64); +extern void ia64_save_scratch_fpregs (struct ia64_fpreg *); +extern void ia64_load_scratch_fpregs (struct ia64_fpreg *); + +#define PAL_CALL(iprv,a0,a1,a2,a3) do { \ + struct ia64_fpreg fr[6]; \ + ia64_save_scratch_fpregs(fr); \ + iprv = ia64_pal_call_static(a0, a1, a2, a3, 0); \ + ia64_load_scratch_fpregs(fr); \ +} while (0) + +#define PAL_CALL_IC_OFF(iprv,a0,a1,a2,a3) do { \ + struct ia64_fpreg fr[6]; \ + ia64_save_scratch_fpregs(fr); \ + iprv = ia64_pal_call_static(a0, a1, a2, a3, 1); \ + ia64_load_scratch_fpregs(fr); \ +} while (0) + +#define PAL_CALL_STK(iprv,a0,a1,a2,a3) do { \ + struct ia64_fpreg fr[6]; \ + ia64_save_scratch_fpregs(fr); \ + iprv = ia64_pal_call_stacked(a0, a1, a2, a3); \ + ia64_load_scratch_fpregs(fr); \ +} while (0) + +#define PAL_CALL_PHYS(iprv,a0,a1,a2,a3) do { \ + struct ia64_fpreg fr[6]; \ + ia64_save_scratch_fpregs(fr); \ + iprv = ia64_pal_call_phys_static(a0, a1, a2, a3); \ + ia64_load_scratch_fpregs(fr); \ +} while (0) + +#define PAL_CALL_PHYS_STK(iprv,a0,a1,a2,a3) do { \ + struct ia64_fpreg fr[6]; \ + ia64_save_scratch_fpregs(fr); \ + iprv = ia64_pal_call_phys_stacked(a0, a1, a2, a3); \ + ia64_load_scratch_fpregs(fr); \ +} while (0) + +typedef int (*ia64_pal_handler) (u64, ...); +extern ia64_pal_handler ia64_pal; +extern void ia64_pal_handler_init (void *); + +extern ia64_pal_handler ia64_pal; + +extern pal_cache_config_info_t l0d_cache_config_info; +extern pal_cache_config_info_t l0i_cache_config_info; +extern pal_cache_config_info_t l1_cache_config_info; +extern pal_cache_config_info_t l2_cache_config_info; + +extern pal_cache_protection_info_t l0d_cache_protection_info; +extern pal_cache_protection_info_t l0i_cache_protection_info; +extern pal_cache_protection_info_t l1_cache_protection_info; +extern pal_cache_protection_info_t l2_cache_protection_info; + +extern pal_cache_config_info_t pal_cache_config_info_get(pal_cache_level_t, + pal_cache_type_t); + +extern pal_cache_protection_info_t pal_cache_protection_info_get(pal_cache_level_t, + pal_cache_type_t); + + +extern void pal_error(int); + + +/* Useful wrappers for the current list of pal procedures */ + +typedef union pal_bus_features_u { + u64 pal_bus_features_val; + struct { + u64 pbf_reserved1 : 29; + u64 pbf_req_bus_parking : 1; + u64 pbf_bus_lock_mask : 1; + u64 pbf_enable_half_xfer_rate : 1; + u64 pbf_reserved2 : 22; + u64 pbf_disable_xaction_queueing : 1; + u64 pbf_disable_resp_err_check : 1; + u64 pbf_disable_berr_check : 1; + u64 pbf_disable_bus_req_internal_err_signal : 1; + u64 pbf_disable_bus_req_berr_signal : 1; + u64 pbf_disable_bus_init_event_check : 1; + u64 pbf_disable_bus_init_event_signal : 1; + u64 pbf_disable_bus_addr_err_check : 1; + u64 pbf_disable_bus_addr_err_signal : 1; + u64 pbf_disable_bus_data_err_check : 1; + } pal_bus_features_s; +} pal_bus_features_u_t; + +extern void pal_bus_features_print (u64); + +/* Provide information about configurable processor bus features */ +static inline s64 +ia64_pal_bus_get_features (pal_bus_features_u_t *features_avail, + pal_bus_features_u_t *features_status, + pal_bus_features_u_t *features_control) +{ + struct ia64_pal_retval iprv; + PAL_CALL_PHYS(iprv, PAL_BUS_GET_FEATURES, 0, 0, 0); + if (features_avail) + features_avail->pal_bus_features_val = iprv.v0; + if (features_status) + features_status->pal_bus_features_val = iprv.v1; + if (features_control) + features_control->pal_bus_features_val = iprv.v2; + return iprv.status; +} + +/* Enables/disables specific processor bus features */ +static inline s64 +ia64_pal_bus_set_features (pal_bus_features_u_t feature_select) +{ + struct ia64_pal_retval iprv; + PAL_CALL_PHYS(iprv, PAL_BUS_SET_FEATURES, feature_select.pal_bus_features_val, 0, 0); + return iprv.status; +} + +/* Get detailed cache information */ +static inline s64 +ia64_pal_cache_config_info (u64 cache_level, u64 cache_type, pal_cache_config_info_t *conf) +{ + struct ia64_pal_retval iprv; + + PAL_CALL(iprv, PAL_CACHE_INFO, cache_level, cache_type, 0); + + if (iprv.status == 0) { + conf->pcci_status = iprv.status; + conf->pcci_info_1.pcci1_data = iprv.v0; + conf->pcci_info_2.pcci2_data = iprv.v1; + conf->pcci_reserved = iprv.v2; + } + return iprv.status; + +} + +/* Get detailed cche protection information */ +static inline s64 +ia64_pal_cache_prot_info (u64 cache_level, u64 cache_type, pal_cache_protection_info_t *prot) +{ + struct ia64_pal_retval iprv; + + PAL_CALL(iprv, PAL_CACHE_PROT_INFO, cache_level, cache_type, 0); + + if (iprv.status == 0) { + prot->pcpi_status = iprv.status; + prot->pcp_info[0].pcpi_data = iprv.v0 & 0xffffffff; + prot->pcp_info[1].pcpi_data = iprv.v0 >> 32; + prot->pcp_info[2].pcpi_data = iprv.v1 & 0xffffffff; + prot->pcp_info[3].pcpi_data = iprv.v1 >> 32; + prot->pcp_info[4].pcpi_data = iprv.v2 & 0xffffffff; + prot->pcp_info[5].pcpi_data = iprv.v2 >> 32; + } + return iprv.status; +} + +/* + * Flush the processor instruction or data caches. *PROGRESS must be + * initialized to zero before calling this for the first time.. + */ +static inline s64 +ia64_pal_cache_flush (u64 cache_type, u64 invalidate, u64 *progress, u64 *vector) +{ + struct ia64_pal_retval iprv; + PAL_CALL_IC_OFF(iprv, PAL_CACHE_FLUSH, cache_type, invalidate, *progress); + if (vector) + *vector = iprv.v0; + *progress = iprv.v1; + return iprv.status; +} + + +/* Initialize the processor controlled caches */ +static inline s64 +ia64_pal_cache_init (u64 level, u64 cache_type, u64 rest) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_CACHE_INIT, level, cache_type, rest); + return iprv.status; +} + +/* Initialize the tags and data of a data or unified cache line of + * processor controlled cache to known values without the availability + * of backing memory. + */ +static inline s64 +ia64_pal_cache_line_init (u64 physical_addr, u64 data_value) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_CACHE_LINE_INIT, physical_addr, data_value, 0); + return iprv.status; +} + + +/* Read the data and tag of a processor controlled cache line for diags */ +static inline s64 +ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_CACHE_READ, line_id.pclid_data, physical_addr, 0); + return iprv.status; +} + +/* Return summary information about the heirarchy of caches controlled by the processor */ +static inline s64 +ia64_pal_cache_summary (u64 *cache_levels, u64 *unique_caches) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_CACHE_SUMMARY, 0, 0, 0); + if (cache_levels) + *cache_levels = iprv.v0; + if (unique_caches) + *unique_caches = iprv.v1; + return iprv.status; +} + +/* Write the data and tag of a processor-controlled cache line for diags */ +static inline s64 +ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_CACHE_WRITE, line_id.pclid_data, physical_addr, data); + return iprv.status; +} + + +/* Return the parameters needed to copy relocatable PAL procedures from ROM to memory */ +static inline s64 +ia64_pal_copy_info (u64 copy_type, u64 num_procs, u64 num_iopics, + u64 *buffer_size, u64 *buffer_align) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_COPY_INFO, copy_type, num_procs, num_iopics); + if (buffer_size) + *buffer_size = iprv.v0; + if (buffer_align) + *buffer_align = iprv.v1; + return iprv.status; +} + +/* Copy relocatable PAL procedures from ROM to memory */ +static inline s64 +ia64_pal_copy_pal (u64 target_addr, u64 alloc_size, u64 processor, u64 *pal_proc_offset) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_COPY_PAL, target_addr, alloc_size, processor); + if (pal_proc_offset) + *pal_proc_offset = iprv.v0; + return iprv.status; +} + +/* Return the number of instruction and data debug register pairs */ +static inline s64 +ia64_pal_debug_info (u64 *inst_regs, u64 *data_regs) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_DEBUG_INFO, 0, 0, 0); + if (inst_regs) + *inst_regs = iprv.v0; + if (data_regs) + *data_regs = iprv.v1; + + return iprv.status; +} + +#ifdef TBD +/* Switch from IA64-system environment to IA-32 system environment */ +static inline s64 +ia64_pal_enter_ia32_env (ia32_env1, ia32_env2, ia32_env3) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_ENTER_IA_32_ENV, ia32_env1, ia32_env2, ia32_env3); + return iprv.status; +} +#endif + +/* Get unique geographical address of this processor on its bus */ +static inline s64 +ia64_pal_fixed_addr (u64 *global_unique_addr) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_FIXED_ADDR, 0, 0, 0); + if (global_unique_addr) + *global_unique_addr = iprv.v0; + return iprv.status; +} + +/* Get base frequency of the platform if generated by the processor */ +static inline s64 +ia64_pal_freq_base (u64 *platform_base_freq) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_FREQ_BASE, 0, 0, 0); + if (platform_base_freq) + *platform_base_freq = iprv.v0; + return iprv.status; +} + +/* + * Get the ratios for processor frequency, bus frequency and interval timer to + * to base frequency of the platform + */ +static inline s64 +ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio, + struct pal_freq_ratio *itc_ratio) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_FREQ_RATIOS, 0, 0, 0); + if (proc_ratio) + *(u64 *)proc_ratio = iprv.v0; + if (bus_ratio) + *(u64 *)bus_ratio = iprv.v1; + if (itc_ratio) + *(u64 *)itc_ratio = iprv.v2; + return iprv.status; +} + +/* Make the processor enter HALT or one of the implementation dependent low + * power states where prefetching and execution are suspended and cache and + * TLB coherency is not maintained. + */ +static inline s64 +ia64_pal_halt (u64 halt_state) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_HALT, halt_state, 0, 0); + return iprv.status; +} + +typedef union pal_power_mgmt_info_u { + u64 ppmi_data; + struct { + u64 exit_latency : 16, + entry_latency : 16, + power_consumption : 28, + im : 1, + co : 1, + reserved : 2; + } pal_power_mgmt_info_s; +} pal_power_mgmt_info_u_t; + +/* Return information about processor's optional power management capabilities. */ +static inline s64 +ia64_pal_halt_info (pal_power_mgmt_info_u_t *power_buf) +{ + struct ia64_pal_retval iprv; + PAL_CALL_STK(iprv, PAL_HALT_INFO, (unsigned long) power_buf, 0, 0); + return iprv.status; +} + +/* Cause the processor to enter LIGHT HALT state, where prefetching and execution are + * suspended, but cache and TLB coherency is maintained. + */ +static inline s64 +ia64_pal_halt_light (void) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_HALT_LIGHT, 0, 0, 0); + return iprv.status; +} + +/* Clear all the processor error logging registers and reset the indicator that allows + * the error logging registers to be written. This procedure also checks the pending + * machine check bit and pending INIT bit and reports their states. + */ +static inline s64 +ia64_pal_mc_clear_log (u64 *pending_vector) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_MC_CLEAR_LOG, 0, 0, 0); + if (pending_vector) + *pending_vector = iprv.v0; + return iprv.status; +} + +/* Ensure that all outstanding transactions in a processor are completed or that any + * MCA due to thes outstanding transaction is taken. + */ +static inline s64 +ia64_pal_mc_drain (void) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_MC_DRAIN, 0, 0, 0); + return iprv.status; +} + +/* Return the machine check dynamic processor state */ +static inline s64 +ia64_pal_mc_dynamic_state (u64 offset, u64 *size, u64 *pds) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_MC_DYNAMIC_STATE, offset, 0, 0); + if (size) + *size = iprv.v0; + if (pds) + *pds = iprv.v1; + return iprv.status; +} + +/* Return processor machine check information */ +static inline s64 +ia64_pal_mc_error_info (u64 info_index, u64 type_index, u64 *size, u64 *error_info) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_MC_ERROR_INFO, info_index, type_index, 0); + if (size) + *size = iprv.v0; + if (error_info) + *error_info = iprv.v1; + return iprv.status; +} + +/* Inform PALE_CHECK whether a machine check is expected so that PALE_CHECK willnot + * attempt to correct any expected machine checks. + */ +static inline s64 +ia64_pal_mc_expected (u64 expected, u64 *previous) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_MC_EXPECTED, expected, 0, 0); + if (previous) + *previous = iprv.v0; + return iprv.status; +} + +/* Register a platform dependent location with PAL to which it can save + * minimal processor state in the event of a machine check or initialization + * event. + */ +static inline s64 +ia64_pal_mc_register_mem (u64 physical_addr) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_MC_REGISTER_MEM, physical_addr, 0, 0); + return iprv.status; +} + +/* Restore minimal architectural processor state, set CMC interrupt if necessary + * and resume execution + */ +static inline s64 +ia64_pal_mc_resume (u64 set_cmci, u64 save_ptr) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_MC_RESUME, set_cmci, save_ptr, 0); + return iprv.status; +} + +/* Return the memory attributes implemented by the processor */ +static inline s64 +ia64_pal_mem_attrib (u64 *mem_attrib) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_MEM_ATTRIB, 0, 0, 0); + if (mem_attrib) + *mem_attrib = iprv.v0 & 0xff; + return iprv.status; +} + +/* Return the amount of memory needed for second phase of processor + * self-test and the required alignment of memory. + */ +static inline s64 +ia64_pal_mem_for_test (u64 *bytes_needed, u64 *alignment) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_MEM_FOR_TEST, 0, 0, 0); + if (bytes_needed) + *bytes_needed = iprv.v0; + if (alignment) + *alignment = iprv.v1; + return iprv.status; +} + +typedef union pal_perf_mon_info_u { + u64 ppmi_data; + struct { + u64 generic : 8, + width : 8, + cycles : 8, + retired : 8, + reserved : 32; + } pal_perf_mon_info_s; +} pal_perf_mon_info_u_t; + +/* Return the performance monitor information about what can be counted + * and how to configure the monitors to count the desired events. + */ +static inline s64 +ia64_pal_perf_mon_info (u64 *pm_buffer, pal_perf_mon_info_u_t *pm_info) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_PERF_MON_INFO, (unsigned long) pm_buffer, 0, 0); + if (pm_info) + pm_info->ppmi_data = iprv.v0; + return iprv.status; +} + +/* Specifies the physical address of the processor interrupt block + * and I/O port space. + */ +static inline s64 +ia64_pal_platform_addr (u64 type, u64 physical_addr) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_PLATFORM_ADDR, type, physical_addr, 0); + return iprv.status; +} + +/* Set the SAL PMI entrypoint in memory */ +static inline s64 +ia64_pal_pmi_entrypoint (u64 sal_pmi_entry_addr) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_PMI_ENTRYPOINT, sal_pmi_entry_addr, 0, 0); + return iprv.status; +} + +struct pal_features_s; +/* Provide information about configurable processor features */ +static inline s64 +ia64_pal_proc_get_features (u64 *features_avail, + u64 *features_status, + u64 *features_control) +{ + struct ia64_pal_retval iprv; + PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, 0, 0); + if (iprv.status == 0) { + *features_avail = iprv.v0; + *features_status = iprv.v1; + *features_control = iprv.v2; + } + return iprv.status; +} + +/* Enable/disable processor dependent features */ +static inline s64 +ia64_pal_proc_set_features (u64 feature_select) +{ + struct ia64_pal_retval iprv; + PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, feature_select, 0, 0); + return iprv.status; +} + +/* + * Put everything in a struct so we avoid the global offset table whenever + * possible. + */ +typedef struct ia64_ptce_info_s { + u64 base; + u32 count[2]; + u32 stride[2]; +} ia64_ptce_info_t; + +/* Return the information required for the architected loop used to purge + * (initialize) the entire TC + */ +static inline s64 +ia64_get_ptce (ia64_ptce_info_t *ptce) +{ + struct ia64_pal_retval iprv; + + if (!ptce) + return -1; + + PAL_CALL(iprv, PAL_PTCE_INFO, 0, 0, 0); + if (iprv.status == 0) { + ptce->base = iprv.v0; + ptce->count[0] = iprv.v1 >> 32; + ptce->count[1] = iprv.v1 & 0xffffffff; + ptce->stride[0] = iprv.v2 >> 32; + ptce->stride[1] = iprv.v2 & 0xffffffff; + } + return iprv.status; +} + +/* Return info about implemented application and control registers. */ +static inline s64 +ia64_pal_register_info (u64 info_request, u64 *reg_info_1, u64 *reg_info_2) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_REGISTER_INFO, info_request, 0, 0); + if (reg_info_1) + *reg_info_1 = iprv.v0; + if (reg_info_2) + *reg_info_2 = iprv.v1; + return iprv.status; +} + +typedef union pal_hints_u { + u64 ph_data; + struct { + u64 si : 1, + li : 1, + reserved : 62; + } pal_hints_s; +} pal_hints_u_t; + +/* Return information about the register stack and RSE for this processor + * implementation. + */ +static inline s64 +ia64_pal_rse_info (u64 *num_phys_stacked, pal_hints_u_t *hints) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_RSE_INFO, 0, 0, 0); + if (num_phys_stacked) + *num_phys_stacked = iprv.v0; + if (hints) + hints->ph_data = iprv.v1; + return iprv.status; +} + +/* Cause the processor to enter SHUTDOWN state, where prefetching and execution are + * suspended, but cause cache and TLB coherency to be maintained. + * This is usually called in IA-32 mode. + */ +static inline s64 +ia64_pal_shutdown (void) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_SHUTDOWN, 0, 0, 0); + return iprv.status; +} + +/* Perform the second phase of processor self-test. */ +static inline s64 +ia64_pal_test_proc (u64 test_addr, u64 test_size, u64 attributes, u64 *self_test_state) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_TEST_PROC, test_addr, test_size, attributes); + if (self_test_state) + *self_test_state = iprv.v0; + return iprv.status; +} + +typedef union pal_version_u { + u64 pal_version_val; + struct { + u64 pv_pal_b_rev : 8; + u64 pv_pal_b_model : 8; + u64 pv_reserved1 : 8; + u64 pv_pal_vendor : 8; + u64 pv_pal_a_rev : 8; + u64 pv_pal_a_model : 8; + u64 pv_reserved2 : 16; + } pal_version_s; +} pal_version_u_t; + + +/* Return PAL version information */ +static inline s64 +ia64_pal_version (pal_version_u_t *pal_min_version, pal_version_u_t *pal_cur_version) +{ + struct ia64_pal_retval iprv; + PAL_CALL_PHYS(iprv, PAL_VERSION, 0, 0, 0); + if (pal_min_version) + pal_min_version->pal_version_val = iprv.v0; + + if (pal_cur_version) + pal_cur_version->pal_version_val = iprv.v1; + + return iprv.status; +} + +typedef union pal_tc_info_u { + u64 pti_val; + struct { + u64 num_sets : 8, + associativity : 8, + num_entries : 16, + pf : 1, + unified : 1, + reduce_tr : 1, + reserved : 29; + } pal_tc_info_s; +} pal_tc_info_u_t; + +#define tc_reduce_tr pal_tc_info_s.reduce_tr +#define tc_unified pal_tc_info_s.unified +#define tc_pf pal_tc_info_s.pf +#define tc_num_entries pal_tc_info_s.num_entries +#define tc_associativity pal_tc_info_s.associativity +#define tc_num_sets pal_tc_info_s.num_sets + + +/* Return information about the virtual memory characteristics of the processor + * implementation. + */ +static inline s64 +ia64_pal_vm_info (u64 tc_level, u64 tc_type, pal_tc_info_u_t *tc_info, u64 *tc_pages) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_VM_INFO, tc_level, tc_type, 0); + if (tc_info) + tc_info->pti_val = iprv.v0; + if (tc_pages) + *tc_pages = iprv.v1; + return iprv.status; +} + +/* Get page size information about the virtual memory characteristics of the processor + * implementation. + */ +static inline s64 +ia64_pal_vm_page_size (u64 *tr_pages, u64 *vw_pages) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_VM_PAGE_SIZE, 0, 0, 0); + if (tr_pages) + *tr_pages = iprv.v0; + if (vw_pages) + *vw_pages = iprv.v1; + return iprv.status; +} + +typedef union pal_vm_info_1_u { + u64 pvi1_val; + struct { + u64 vw : 1, + phys_add_size : 7, + key_size : 8, + max_pkr : 8, + hash_tag_id : 8, + max_dtr_entry : 8, + max_itr_entry : 8, + max_unique_tcs : 8, + num_tc_levels : 8; + } pal_vm_info_1_s; +} pal_vm_info_1_u_t; + +typedef union pal_vm_info_2_u { + u64 pvi2_val; + struct { + u64 impl_va_msb : 8, + rid_size : 8, + reserved : 48; + } pal_vm_info_2_s; +} pal_vm_info_2_u_t; + +/* Get summary information about the virtual memory characteristics of the processor + * implementation. + */ +static inline s64 +ia64_pal_vm_summary (pal_vm_info_1_u_t *vm_info_1, pal_vm_info_2_u_t *vm_info_2) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_VM_SUMMARY, 0, 0, 0); + if (vm_info_1) + vm_info_1->pvi1_val = iprv.v0; + if (vm_info_2) + vm_info_2->pvi2_val = iprv.v1; + return iprv.status; +} + +typedef union pal_itr_valid_u { + u64 piv_val; + struct { + u64 access_rights_valid : 1, + priv_level_valid : 1, + dirty_bit_valid : 1, + mem_attr_valid : 1, + reserved : 60; + } pal_tr_valid_s; +} pal_tr_valid_u_t; + +/* Read a translation register */ +static inline s64 +ia64_pal_tr_read (u64 reg_num, u64 tr_type, u64 *tr_buffer, pal_tr_valid_u_t *tr_valid) +{ + struct ia64_pal_retval iprv; + PAL_CALL_PHYS_STK(iprv, PAL_VM_TR_READ, reg_num, tr_type,(u64)ia64_tpa(tr_buffer)); + if (tr_valid) + tr_valid->piv_val = iprv.v0; + return iprv.status; +} + +/* + * PAL_PREFETCH_VISIBILITY transaction types + */ +#define PAL_VISIBILITY_VIRTUAL 0 +#define PAL_VISIBILITY_PHYSICAL 1 + +/* + * PAL_PREFETCH_VISIBILITY return codes + */ +#define PAL_VISIBILITY_OK 1 +#define PAL_VISIBILITY_OK_REMOTE_NEEDED 0 +#define PAL_VISIBILITY_INVAL_ARG -2 +#define PAL_VISIBILITY_ERROR -3 + +static inline s64 +ia64_pal_prefetch_visibility (s64 trans_type) +{ + struct ia64_pal_retval iprv; + PAL_CALL(iprv, PAL_PREFETCH_VISIBILITY, trans_type, 0, 0); + return iprv.status; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_IA64_PAL_H */ diff --git a/include/asm-ia64/param.h b/include/asm-ia64/param.h new file mode 100644 index 000000000000..6c6b679b7a9e --- /dev/null +++ b/include/asm-ia64/param.h @@ -0,0 +1,42 @@ +#ifndef _ASM_IA64_PARAM_H +#define _ASM_IA64_PARAM_H + +/* + * Fundamental kernel parameters. + * + * Based on <asm-i386/param.h>. + * + * Modified 1998, 1999, 2002-2003 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +#define EXEC_PAGESIZE 65536 + +#ifndef NOGROUP +# define NOGROUP (-1) +#endif + +#define MAXHOSTNAMELEN 64 /* max length of hostname */ + +#ifdef __KERNEL__ +# include <linux/config.h> /* mustn't include <linux/config.h> outside of #ifdef __KERNEL__ */ +# ifdef CONFIG_IA64_HP_SIM + /* + * Yeah, simulating stuff is slow, so let us catch some breath between + * timer interrupts... + */ +# define HZ 32 +# else +# define HZ 1024 +# endif +# define USER_HZ HZ +# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */ +#else + /* + * Technically, this is wrong, but some old apps still refer to it. The proper way to + * get the HZ value is via sysconf(_SC_CLK_TCK). + */ +# define HZ 1024 +#endif + +#endif /* _ASM_IA64_PARAM_H */ diff --git a/include/asm-ia64/parport.h b/include/asm-ia64/parport.h new file mode 100644 index 000000000000..67e16adfcd25 --- /dev/null +++ b/include/asm-ia64/parport.h @@ -0,0 +1,20 @@ +/* + * parport.h: platform-specific PC-style parport initialisation + * + * Copyright (C) 1999, 2000 Tim Waugh <tim@cyberelk.demon.co.uk> + * + * This file should only be included by drivers/parport/parport_pc.c. + */ + +#ifndef _ASM_IA64_PARPORT_H +#define _ASM_IA64_PARPORT_H 1 + +static int __devinit parport_pc_find_isa_ports (int autoirq, int autodma); + +static int __devinit +parport_pc_find_nonpci_ports (int autoirq, int autodma) +{ + return parport_pc_find_isa_ports(autoirq, autodma); +} + +#endif /* _ASM_IA64_PARPORT_H */ diff --git a/include/asm-ia64/patch.h b/include/asm-ia64/patch.h new file mode 100644 index 000000000000..4797f3535e6d --- /dev/null +++ b/include/asm-ia64/patch.h @@ -0,0 +1,25 @@ +#ifndef _ASM_IA64_PATCH_H +#define _ASM_IA64_PATCH_H + +/* + * Copyright (C) 2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * + * There are a number of reasons for patching instructions. Rather than duplicating code + * all over the place, we put the common stuff here. Reasons for patching: in-kernel + * module-loader, virtual-to-physical patch-list, McKinley Errata 9 workaround, and gate + * shared library. Undoubtedly, some of these reasons will disappear and others will + * be added over time. + */ +#include <linux/elf.h> +#include <linux/types.h> + +extern void ia64_patch (u64 insn_addr, u64 mask, u64 val); /* patch any insn slot */ +extern void ia64_patch_imm64 (u64 insn_addr, u64 val); /* patch "movl" w/abs. value*/ +extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel value */ + +extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end); +extern void ia64_patch_vtop (unsigned long start, unsigned long end); +extern void ia64_patch_gate (void); + +#endif /* _ASM_IA64_PATCH_H */ diff --git a/include/asm-ia64/pci.h b/include/asm-ia64/pci.h new file mode 100644 index 000000000000..a8314ee4e7d2 --- /dev/null +++ b/include/asm-ia64/pci.h @@ -0,0 +1,141 @@ +#ifndef _ASM_IA64_PCI_H +#define _ASM_IA64_PCI_H + +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <asm/io.h> +#include <asm/scatterlist.h> + +/* + * Can be used to override the logic in pci_scan_bus for skipping already-configured bus + * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the + * loader. + */ +#define pcibios_assign_all_busses() 0 +#define pcibios_scan_all_fns(a, b) 0 + +#define PCIBIOS_MIN_IO 0x1000 +#define PCIBIOS_MIN_MEM 0x10000000 + +void pcibios_config_init(void); + +struct pci_dev; + +/* + * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence + * between device bus addresses and CPU physical addresses. Platforms with a hardware I/O + * MMU _must_ turn this off to suppress the bounce buffer handling code in the block and + * network device layers. Platforms with separate bus address spaces _must_ turn this off + * and provide a device DMA mapping implementation that takes care of the necessary + * address translation. + * + * For now, the ia64 platforms which may have separate/multiple bus address spaces all + * have I/O MMUs which support the merging of physically discontiguous buffers, so we can + * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS. + */ +extern unsigned long ia64_max_iommu_merge_mask; +#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) + +static inline void +pcibios_set_master (struct pci_dev *dev) +{ + /* No special bus mastering setup handling */ +} + +static inline void +pcibios_penalize_isa_irq (int irq) +{ + /* We don't do dynamic PCI IRQ allocation */ +} + +#define HAVE_ARCH_PCI_MWI 1 +extern int pcibios_prep_mwi (struct pci_dev *); + +#include <asm-generic/pci-dma-compat.h> + +/* pci_unmap_{single,page} is not a nop, thus... */ +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME; +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + __u32 LEN_NAME; +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) + +/* The ia64 platform always supports 64-bit addressing. */ +#define pci_dac_dma_supported(pci_dev, mask) (1) +#define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off)) +#define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr))) +#define pci_dac_dma_to_offset(dev,dma_addr) offset_in_page(dma_addr) +#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir) do { } while (0) +#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir) do { mb(); } while (0) + +#define sg_dma_len(sg) ((sg)->dma_length) +#define sg_dma_address(sg) ((sg)->dma_address) + +#define HAVE_PCI_MMAP +extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine); +#define HAVE_PCI_LEGACY +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma); +extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off, + size_t count); +extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off, + size_t count); +extern int pci_mmap_legacy_mem(struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma); + +#define pci_get_legacy_mem platform_pci_get_legacy_mem +#define pci_legacy_read platform_pci_legacy_read +#define pci_legacy_write platform_pci_legacy_write + +struct pci_window { + struct resource resource; + u64 offset; +}; + +struct pci_controller { + void *acpi_handle; + void *iommu; + int segment; + + unsigned int windows; + struct pci_window *window; + + void *platform_data; +}; + +#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata) +#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment) + +extern struct pci_ops pci_root_ops; + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + return (pci_domain_nr(bus) != 0); +} + +static inline void pcibios_add_platform_entries(struct pci_dev *dev) +{ +} + +extern void pcibios_resource_to_bus(struct pci_dev *dev, + struct pci_bus_region *region, struct resource *res); + +extern void pcibios_bus_to_resource(struct pci_dev *dev, + struct resource *res, struct pci_bus_region *region); + +#define pcibios_scan_all_fns(a, b) 0 + +#endif /* _ASM_IA64_PCI_H */ diff --git a/include/asm-ia64/percpu.h b/include/asm-ia64/percpu.h new file mode 100644 index 000000000000..1e87f19dad56 --- /dev/null +++ b/include/asm-ia64/percpu.h @@ -0,0 +1,72 @@ +#ifndef _ASM_IA64_PERCPU_H +#define _ASM_IA64_PERCPU_H + +/* + * Copyright (C) 2002-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE + +#ifdef __ASSEMBLY__ +# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */ +#else /* !__ASSEMBLY__ */ + +#include <linux/config.h> + +#include <linux/threads.h> + +#ifdef HAVE_MODEL_SMALL_ATTRIBUTE +# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__))) +#else +# define __SMALL_ADDR_AREA +#endif + +#define DECLARE_PER_CPU(type, name) \ + extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name + +/* Separate out the type, so (int[3], foo) works. */ +#define DEFINE_PER_CPU(type, name) \ + __attribute__((__section__(".data.percpu"))) \ + __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name + +/* + * Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an + * external routine, to avoid include-hell. + */ +#ifdef CONFIG_SMP + +extern unsigned long __per_cpu_offset[NR_CPUS]; + +/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */ +DECLARE_PER_CPU(unsigned long, local_per_cpu_offset); + +#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu])) +#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset))) + +extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size); +extern void setup_per_cpu_areas (void); +extern void *per_cpu_init(void); + +#else /* ! SMP */ + +#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) +#define __get_cpu_var(var) per_cpu__##var +#define per_cpu_init() (__phys_per_cpu_start) + +#endif /* SMP */ + +#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) +#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) + +/* + * Be extremely careful when taking the address of this variable! Due to virtual + * remapping, it is different from the canonical address returned by __get_cpu_var(var)! + * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly + * more efficient. + */ +#define __ia64_per_cpu_var(var) (per_cpu__##var) + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_IA64_PERCPU_H */ diff --git a/include/asm-ia64/perfmon.h b/include/asm-ia64/perfmon.h new file mode 100644 index 000000000000..136c60e6bfcc --- /dev/null +++ b/include/asm-ia64/perfmon.h @@ -0,0 +1,259 @@ +/* + * Copyright (C) 2001-2003 Hewlett-Packard Co + * Stephane Eranian <eranian@hpl.hp.com> + */ + +#ifndef _ASM_IA64_PERFMON_H +#define _ASM_IA64_PERFMON_H + +/* + * perfmon comamnds supported on all CPU models + */ +#define PFM_WRITE_PMCS 0x01 +#define PFM_WRITE_PMDS 0x02 +#define PFM_READ_PMDS 0x03 +#define PFM_STOP 0x04 +#define PFM_START 0x05 +#define PFM_ENABLE 0x06 /* obsolete */ +#define PFM_DISABLE 0x07 /* obsolete */ +#define PFM_CREATE_CONTEXT 0x08 +#define PFM_DESTROY_CONTEXT 0x09 /* obsolete use close() */ +#define PFM_RESTART 0x0a +#define PFM_PROTECT_CONTEXT 0x0b /* obsolete */ +#define PFM_GET_FEATURES 0x0c +#define PFM_DEBUG 0x0d +#define PFM_UNPROTECT_CONTEXT 0x0e /* obsolete */ +#define PFM_GET_PMC_RESET_VAL 0x0f +#define PFM_LOAD_CONTEXT 0x10 +#define PFM_UNLOAD_CONTEXT 0x11 + +/* + * PMU model specific commands (may not be supported on all PMU models) + */ +#define PFM_WRITE_IBRS 0x20 +#define PFM_WRITE_DBRS 0x21 + +/* + * context flags + */ +#define PFM_FL_NOTIFY_BLOCK 0x01 /* block task on user level notifications */ +#define PFM_FL_SYSTEM_WIDE 0x02 /* create a system wide context */ +#define PFM_FL_OVFL_NO_MSG 0x80 /* do not post overflow/end messages for notification */ + +/* + * event set flags + */ +#define PFM_SETFL_EXCL_IDLE 0x01 /* exclude idle task (syswide only) XXX: DO NOT USE YET */ + +/* + * PMC flags + */ +#define PFM_REGFL_OVFL_NOTIFY 0x1 /* send notification on overflow */ +#define PFM_REGFL_RANDOM 0x2 /* randomize sampling interval */ + +/* + * PMD/PMC/IBR/DBR return flags (ignored on input) + * + * Those flags are used on output and must be checked in case EAGAIN is returned + * by any of the calls using a pfarg_reg_t or pfarg_dbreg_t structure. + */ +#define PFM_REG_RETFL_NOTAVAIL (1UL<<31) /* set if register is implemented but not available */ +#define PFM_REG_RETFL_EINVAL (1UL<<30) /* set if register entry is invalid */ +#define PFM_REG_RETFL_MASK (PFM_REG_RETFL_NOTAVAIL|PFM_REG_RETFL_EINVAL) + +#define PFM_REG_HAS_ERROR(flag) (((flag) & PFM_REG_RETFL_MASK) != 0) + +typedef unsigned char pfm_uuid_t[16]; /* custom sampling buffer identifier type */ + +/* + * Request structure used to define a context + */ +typedef struct { + pfm_uuid_t ctx_smpl_buf_id; /* which buffer format to use (if needed) */ + unsigned long ctx_flags; /* noblock/block */ + unsigned short ctx_nextra_sets; /* number of extra event sets (you always get 1) */ + unsigned short ctx_reserved1; /* for future use */ + int ctx_fd; /* return arg: unique identification for context */ + void *ctx_smpl_vaddr; /* return arg: virtual address of sampling buffer, is used */ + unsigned long ctx_reserved2[11];/* for future use */ +} pfarg_context_t; + +/* + * Request structure used to write/read a PMC or PMD + */ +typedef struct { + unsigned int reg_num; /* which register */ + unsigned short reg_set; /* event set for this register */ + unsigned short reg_reserved1; /* for future use */ + + unsigned long reg_value; /* initial pmc/pmd value */ + unsigned long reg_flags; /* input: pmc/pmd flags, return: reg error */ + + unsigned long reg_long_reset; /* reset after buffer overflow notification */ + unsigned long reg_short_reset; /* reset after counter overflow */ + + unsigned long reg_reset_pmds[4]; /* which other counters to reset on overflow */ + unsigned long reg_random_seed; /* seed value when randomization is used */ + unsigned long reg_random_mask; /* bitmask used to limit random value */ + unsigned long reg_last_reset_val;/* return: PMD last reset value */ + + unsigned long reg_smpl_pmds[4]; /* which pmds are accessed when PMC overflows */ + unsigned long reg_smpl_eventid; /* opaque sampling event identifier */ + + unsigned long reg_reserved2[3]; /* for future use */ +} pfarg_reg_t; + +typedef struct { + unsigned int dbreg_num; /* which debug register */ + unsigned short dbreg_set; /* event set for this register */ + unsigned short dbreg_reserved1; /* for future use */ + unsigned long dbreg_value; /* value for debug register */ + unsigned long dbreg_flags; /* return: dbreg error */ + unsigned long dbreg_reserved2[1]; /* for future use */ +} pfarg_dbreg_t; + +typedef struct { + unsigned int ft_version; /* perfmon: major [16-31], minor [0-15] */ + unsigned int ft_reserved; /* reserved for future use */ + unsigned long reserved[4]; /* for future use */ +} pfarg_features_t; + +typedef struct { + pid_t load_pid; /* process to load the context into */ + unsigned short load_set; /* first event set to load */ + unsigned short load_reserved1; /* for future use */ + unsigned long load_reserved2[3]; /* for future use */ +} pfarg_load_t; + +typedef struct { + int msg_type; /* generic message header */ + int msg_ctx_fd; /* generic message header */ + unsigned long msg_ovfl_pmds[4]; /* which PMDs overflowed */ + unsigned short msg_active_set; /* active set at the time of overflow */ + unsigned short msg_reserved1; /* for future use */ + unsigned int msg_reserved2; /* for future use */ + unsigned long msg_tstamp; /* for perf tuning/debug */ +} pfm_ovfl_msg_t; + +typedef struct { + int msg_type; /* generic message header */ + int msg_ctx_fd; /* generic message header */ + unsigned long msg_tstamp; /* for perf tuning */ +} pfm_end_msg_t; + +typedef struct { + int msg_type; /* type of the message */ + int msg_ctx_fd; /* unique identifier for the context */ + unsigned long msg_tstamp; /* for perf tuning */ +} pfm_gen_msg_t; + +#define PFM_MSG_OVFL 1 /* an overflow happened */ +#define PFM_MSG_END 2 /* task to which context was attached ended */ + +typedef union { + pfm_ovfl_msg_t pfm_ovfl_msg; + pfm_end_msg_t pfm_end_msg; + pfm_gen_msg_t pfm_gen_msg; +} pfm_msg_t; + +/* + * Define the version numbers for both perfmon as a whole and the sampling buffer format. + */ +#define PFM_VERSION_MAJ 2U +#define PFM_VERSION_MIN 0U +#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff)) +#define PFM_VERSION_MAJOR(x) (((x)>>16) & 0xffff) +#define PFM_VERSION_MINOR(x) ((x) & 0xffff) + + +/* + * miscellaneous architected definitions + */ +#define PMU_FIRST_COUNTER 4 /* first counting monitor (PMC/PMD) */ +#define PMU_MAX_PMCS 256 /* maximum architected number of PMC registers */ +#define PMU_MAX_PMDS 256 /* maximum architected number of PMD registers */ + +#ifdef __KERNEL__ + +extern long perfmonctl(int fd, int cmd, void *arg, int narg); + +extern void pfm_save_regs (struct task_struct *); +extern void pfm_load_regs (struct task_struct *); + +extern void pfm_exit_thread(struct task_struct *); +extern int pfm_use_debug_registers(struct task_struct *); +extern int pfm_release_debug_registers(struct task_struct *); +extern void pfm_syst_wide_update_task(struct task_struct *, unsigned long info, int is_ctxswin); +extern void pfm_inherit(struct task_struct *task, struct pt_regs *regs); +extern void pfm_init_percpu(void); +extern void pfm_handle_work(void); + +/* + * Reset PMD register flags + */ +#define PFM_PMD_SHORT_RESET 0 +#define PFM_PMD_LONG_RESET 1 + +typedef union { + unsigned int val; + struct { + unsigned int notify_user:1; /* notify user program of overflow */ + unsigned int reset_ovfl_pmds:1; /* reset overflowed PMDs */ + unsigned int block_task:1; /* block monitored task on kernel exit */ + unsigned int mask_monitoring:1; /* mask monitors via PMCx.plm */ + unsigned int reserved:28; /* for future use */ + } bits; +} pfm_ovfl_ctrl_t; + +typedef struct { + unsigned char ovfl_pmd; /* index of overflowed PMD */ + unsigned char ovfl_notify; /* =1 if monitor requested overflow notification */ + unsigned short active_set; /* event set active at the time of the overflow */ + pfm_ovfl_ctrl_t ovfl_ctrl; /* return: perfmon controls to set by handler */ + + unsigned long pmd_last_reset; /* last reset value of of the PMD */ + unsigned long smpl_pmds[4]; /* bitmask of other PMD of interest on overflow */ + unsigned long smpl_pmds_values[PMU_MAX_PMDS]; /* values for the other PMDs of interest */ + unsigned long pmd_value; /* current 64-bit value of the PMD */ + unsigned long pmd_eventid; /* eventid associated with PMD */ +} pfm_ovfl_arg_t; + + +typedef struct { + char *fmt_name; + pfm_uuid_t fmt_uuid; + size_t fmt_arg_size; + unsigned long fmt_flags; + + int (*fmt_validate)(struct task_struct *task, unsigned int flags, int cpu, void *arg); + int (*fmt_getsize)(struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size); + int (*fmt_init)(struct task_struct *task, void *buf, unsigned int flags, int cpu, void *arg); + int (*fmt_handler)(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct pt_regs *regs, unsigned long stamp); + int (*fmt_restart)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs); + int (*fmt_restart_active)(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs); + int (*fmt_exit)(struct task_struct *task, void *buf, struct pt_regs *regs); + + struct list_head fmt_list; +} pfm_buffer_fmt_t; + +extern int pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt); +extern int pfm_unregister_buffer_fmt(pfm_uuid_t uuid); + +/* + * perfmon interface exported to modules + */ +extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs); +extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs); +extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs); +extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs); + +/* + * describe the content of the local_cpu_date->pfm_syst_info field + */ +#define PFM_CPUINFO_SYST_WIDE 0x1 /* if set a system wide session exists */ +#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */ +#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_IA64_PERFMON_H */ diff --git a/include/asm-ia64/perfmon_default_smpl.h b/include/asm-ia64/perfmon_default_smpl.h new file mode 100644 index 000000000000..48822c0811d8 --- /dev/null +++ b/include/asm-ia64/perfmon_default_smpl.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2002-2003 Hewlett-Packard Co + * Stephane Eranian <eranian@hpl.hp.com> + * + * This file implements the default sampling buffer format + * for Linux/ia64 perfmon subsystem. + */ +#ifndef __PERFMON_DEFAULT_SMPL_H__ +#define __PERFMON_DEFAULT_SMPL_H__ 1 + +#define PFM_DEFAULT_SMPL_UUID { \ + 0x4d, 0x72, 0xbe, 0xc0, 0x06, 0x64, 0x41, 0x43, 0x82, 0xb4, 0xd3, 0xfd, 0x27, 0x24, 0x3c, 0x97} + +/* + * format specific parameters (passed at context creation) + */ +typedef struct { + unsigned long buf_size; /* size of the buffer in bytes */ + unsigned int flags; /* buffer specific flags */ + unsigned int res1; /* for future use */ + unsigned long reserved[2]; /* for future use */ +} pfm_default_smpl_arg_t; + +/* + * combined context+format specific structure. Can be passed + * to PFM_CONTEXT_CREATE + */ +typedef struct { + pfarg_context_t ctx_arg; + pfm_default_smpl_arg_t buf_arg; +} pfm_default_smpl_ctx_arg_t; + +/* + * This header is at the beginning of the sampling buffer returned to the user. + * It is directly followed by the first record. + */ +typedef struct { + unsigned long hdr_count; /* how many valid entries */ + unsigned long hdr_cur_offs; /* current offset from top of buffer */ + unsigned long hdr_reserved2; /* reserved for future use */ + + unsigned long hdr_overflows; /* how many times the buffer overflowed */ + unsigned long hdr_buf_size; /* how many bytes in the buffer */ + + unsigned int hdr_version; /* contains perfmon version (smpl format diffs) */ + unsigned int hdr_reserved1; /* for future use */ + unsigned long hdr_reserved[10]; /* for future use */ +} pfm_default_smpl_hdr_t; + +/* + * Entry header in the sampling buffer. The header is directly followed + * with the values of the PMD registers of interest saved in increasing + * index order: PMD4, PMD5, and so on. How many PMDs are present depends + * on how the session was programmed. + * + * In the case where multiple counters overflow at the same time, multiple + * entries are written consecutively. + * + * last_reset_value member indicates the initial value of the overflowed PMD. + */ +typedef struct { + int pid; /* thread id (for NPTL, this is gettid()) */ + unsigned char reserved1[3]; /* reserved for future use */ + unsigned char ovfl_pmd; /* index of overflowed PMD */ + + unsigned long last_reset_val; /* initial value of overflowed PMD */ + unsigned long ip; /* where did the overflow interrupt happened */ + unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ + + unsigned short cpu; /* cpu on which the overfow occured */ + unsigned short set; /* event set active when overflow ocurred */ + int tgid; /* thread group id (for NPTL, this is getpid()) */ +} pfm_default_smpl_entry_t; + +#define PFM_DEFAULT_MAX_PMDS 64 /* how many pmds supported by data structures (sizeof(unsigned long) */ +#define PFM_DEFAULT_MAX_ENTRY_SIZE (sizeof(pfm_default_smpl_entry_t)+(sizeof(unsigned long)*PFM_DEFAULT_MAX_PMDS)) +#define PFM_DEFAULT_SMPL_MIN_BUF_SIZE (sizeof(pfm_default_smpl_hdr_t)+PFM_DEFAULT_MAX_ENTRY_SIZE) + +#define PFM_DEFAULT_SMPL_VERSION_MAJ 2U +#define PFM_DEFAULT_SMPL_VERSION_MIN 0U +#define PFM_DEFAULT_SMPL_VERSION (((PFM_DEFAULT_SMPL_VERSION_MAJ&0xffff)<<16)|(PFM_DEFAULT_SMPL_VERSION_MIN & 0xffff)) + +#endif /* __PERFMON_DEFAULT_SMPL_H__ */ diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h new file mode 100644 index 000000000000..0f05dc8bd460 --- /dev/null +++ b/include/asm-ia64/pgalloc.h @@ -0,0 +1,167 @@ +#ifndef _ASM_IA64_PGALLOC_H +#define _ASM_IA64_PGALLOC_H + +/* + * This file contains the functions and defines necessary to allocate + * page tables. + * + * This hopefully works with any (fixed) ia-64 page-size, as defined + * in <asm/page.h> (currently 8192). + * + * Copyright (C) 1998-2001 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com> + */ + +#include <linux/config.h> + +#include <linux/compiler.h> +#include <linux/mm.h> +#include <linux/page-flags.h> +#include <linux/threads.h> + +#include <asm/mmu_context.h> + +/* + * Very stupidly, we used to get new pgd's and pmd's, init their contents + * to point to the NULL versions of the next level page table, later on + * completely re-init them the same way, then free them up. This wasted + * a lot of work and caused unnecessary memory traffic. How broken... + * We fix this by caching them. + */ +#define pgd_quicklist (local_cpu_data->pgd_quick) +#define pmd_quicklist (local_cpu_data->pmd_quick) +#define pgtable_cache_size (local_cpu_data->pgtable_cache_sz) + +static inline pgd_t* +pgd_alloc_one_fast (struct mm_struct *mm) +{ + unsigned long *ret = NULL; + + preempt_disable(); + + ret = pgd_quicklist; + if (likely(ret != NULL)) { + pgd_quicklist = (unsigned long *)(*ret); + ret[0] = 0; + --pgtable_cache_size; + } else + ret = NULL; + + preempt_enable(); + + return (pgd_t *) ret; +} + +static inline pgd_t* +pgd_alloc (struct mm_struct *mm) +{ + /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */ + pgd_t *pgd = pgd_alloc_one_fast(mm); + + if (unlikely(pgd == NULL)) { + pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); + } + return pgd; +} + +static inline void +pgd_free (pgd_t *pgd) +{ + preempt_disable(); + *(unsigned long *)pgd = (unsigned long) pgd_quicklist; + pgd_quicklist = (unsigned long *) pgd; + ++pgtable_cache_size; + preempt_enable(); +} + +static inline void +pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd) +{ + pud_val(*pud_entry) = __pa(pmd); +} + +static inline pmd_t* +pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr) +{ + unsigned long *ret = NULL; + + preempt_disable(); + + ret = (unsigned long *)pmd_quicklist; + if (likely(ret != NULL)) { + pmd_quicklist = (unsigned long *)(*ret); + ret[0] = 0; + --pgtable_cache_size; + } + + preempt_enable(); + + return (pmd_t *)ret; +} + +static inline pmd_t* +pmd_alloc_one (struct mm_struct *mm, unsigned long addr) +{ + pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); + + return pmd; +} + +static inline void +pmd_free (pmd_t *pmd) +{ + preempt_disable(); + *(unsigned long *)pmd = (unsigned long) pmd_quicklist; + pmd_quicklist = (unsigned long *) pmd; + ++pgtable_cache_size; + preempt_enable(); +} + +#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) + +static inline void +pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte) +{ + pmd_val(*pmd_entry) = page_to_phys(pte); +} + +static inline void +pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte) +{ + pmd_val(*pmd_entry) = __pa(pte); +} + +static inline struct page * +pte_alloc_one (struct mm_struct *mm, unsigned long addr) +{ + struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); + + return pte; +} + +static inline pte_t * +pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr) +{ + pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); + + return pte; +} + +static inline void +pte_free (struct page *pte) +{ + __free_page(pte); +} + +static inline void +pte_free_kernel (pte_t *pte) +{ + free_page((unsigned long) pte); +} + +#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte)) + +extern void check_pgt_cache (void); + +#endif /* _ASM_IA64_PGALLOC_H */ diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h new file mode 100644 index 000000000000..1757a811f436 --- /dev/null +++ b/include/asm-ia64/pgtable.h @@ -0,0 +1,593 @@ +#ifndef _ASM_IA64_PGTABLE_H +#define _ASM_IA64_PGTABLE_H + +/* + * This file contains the functions and defines necessary to modify and use + * the IA-64 page table tree. + * + * This hopefully works with any (fixed) IA-64 page-size, as defined + * in <asm/page.h>. + * + * Copyright (C) 1998-2004 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/config.h> + +#include <asm/mman.h> +#include <asm/page.h> +#include <asm/processor.h> +#include <asm/system.h> +#include <asm/types.h> + +#define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */ + +/* + * First, define the various bits in a PTE. Note that the PTE format + * matches the VHPT short format, the firt doubleword of the VHPD long + * format, and the first doubleword of the TLB insertion format. + */ +#define _PAGE_P_BIT 0 +#define _PAGE_A_BIT 5 +#define _PAGE_D_BIT 6 + +#define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */ +#define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */ +#define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */ +#define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */ +#define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */ +#define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */ +#define _PAGE_MA_MASK (0x7 << 2) +#define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */ +#define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */ +#define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */ +#define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */ +#define _PAGE_PL_MASK (3 << 7) +#define _PAGE_AR_R (0 << 9) /* read only */ +#define _PAGE_AR_RX (1 << 9) /* read & execute */ +#define _PAGE_AR_RW (2 << 9) /* read & write */ +#define _PAGE_AR_RWX (3 << 9) /* read, write & execute */ +#define _PAGE_AR_R_RW (4 << 9) /* read / read & write */ +#define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */ +#define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */ +#define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */ +#define _PAGE_AR_MASK (7 << 9) +#define _PAGE_AR_SHIFT 9 +#define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */ +#define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */ +#define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL) +#define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */ +#define _PAGE_PROTNONE (__IA64_UL(1) << 63) + +/* Valid only for a PTE with the present bit cleared: */ +#define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */ + +#define _PFN_MASK _PAGE_PPN_MASK +/* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */ +#define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED) + +#define _PAGE_SIZE_4K 12 +#define _PAGE_SIZE_8K 13 +#define _PAGE_SIZE_16K 14 +#define _PAGE_SIZE_64K 16 +#define _PAGE_SIZE_256K 18 +#define _PAGE_SIZE_1M 20 +#define _PAGE_SIZE_4M 22 +#define _PAGE_SIZE_16M 24 +#define _PAGE_SIZE_64M 26 +#define _PAGE_SIZE_256M 28 +#define _PAGE_SIZE_1G 30 +#define _PAGE_SIZE_4G 32 + +#define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB +#define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB +#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED + +/* + * Definitions for first level: + * + * PGDIR_SHIFT determines what a first-level page table entry can map. + */ +#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3)) +#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) +#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) +#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ +#define FIRST_USER_PGD_NR 0 + +/* + * Definitions for second level: + * + * PMD_SHIFT determines the size of the area a second-level page table + * can map. + */ +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) +#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) + +/* + * Definitions for third level: + */ +#define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3)) + +/* + * All the normal masks have the "page accessed" bits on, as any time + * they are used, the page is accessed. They are cleared only by the + * page-out routines. + */ +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A) +#define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) +#define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +#define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) +#define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) +#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) +#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) +#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) + +# ifndef __ASSEMBLY__ + +#include <asm/bitops.h> +#include <asm/cacheflush.h> +#include <asm/mmu_context.h> +#include <asm/processor.h> + +/* + * Next come the mappings that determine how mmap() protection bits + * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The + * _P version gets used for a private shared memory segment, the _S + * version gets used for a shared memory segment with MAP_SHARED on. + * In a private shared memory segment, we do a copy-on-write if a task + * attempts to write to the page. + */ + /* xwr */ +#define __P000 PAGE_NONE +#define __P001 PAGE_READONLY +#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */ +#define __P011 PAGE_READONLY /* ditto */ +#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX) +#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) +#define __P110 PAGE_COPY_EXEC +#define __P111 PAGE_COPY_EXEC + +#define __S000 PAGE_NONE +#define __S001 PAGE_READONLY +#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */ +#define __S011 PAGE_SHARED +#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX) +#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) +#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) +#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) + +#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) +#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) + + +/* + * Some definitions to translate between mem_map, PTEs, and page addresses: + */ + + +/* Quick test to see if ADDR is a (potentially) valid physical address. */ +static inline long +ia64_phys_addr_valid (unsigned long addr) +{ + return (addr & (local_cpu_data->unimpl_pa_mask)) == 0; +} + +/* + * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel + * memory. For the return value to be meaningful, ADDR must be >= + * PAGE_OFFSET. This operation can be relatively expensive (e.g., + * require a hash-, or multi-level tree-lookup or something of that + * sort) but it guarantees to return TRUE only if accessing the page + * at that address does not cause an error. Note that there may be + * addresses for which kern_addr_valid() returns FALSE even though an + * access would not cause an error (e.g., this is typically true for + * memory mapped I/O regions. + * + * XXX Need to implement this for IA-64. + */ +#define kern_addr_valid(addr) (1) + + +/* + * Now come the defines and routines to manage and access the three-level + * page table. + */ + +/* + * On some architectures, special things need to be done when setting + * the PTE in a page table. Nothing special needs to be on IA-64. + */ +#define set_pte(ptep, pteval) (*(ptep) = (pteval)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) + +#define RGN_SIZE (1UL << 61) +#define RGN_KERNEL 7 + +#define VMALLOC_START 0xa000000200000000UL +#ifdef CONFIG_VIRTUAL_MEM_MAP +# define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9))) +# define VMALLOC_END vmalloc_end + extern unsigned long vmalloc_end; +#else +# define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9))) +#endif + +/* fs/proc/kcore.c */ +#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL) +#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL) + +/* + * Conversion functions: convert page frame number (pfn) and a protection value to a page + * table entry (pte). + */ +#define pfn_pte(pfn, pgprot) \ +({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; }) + +/* Extract pfn from pte. */ +#define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT) + +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) + +/* This takes a physical page address that is used by the remapping functions */ +#define mk_pte_phys(physpage, pgprot) \ +({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; }) + +#define pte_modify(_pte, newprot) \ + (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK))) + +#define page_pte_prot(page,prot) mk_pte(page, prot) +#define page_pte(page) page_pte_prot(page, __pgprot(0)) + +#define pte_none(pte) (!pte_val(pte)) +#define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE)) +#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL) +/* pte_page() returns the "struct page *" corresponding to the PTE: */ +#define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET)) + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd))) +#define pmd_present(pmd) (pmd_val(pmd) != 0UL) +#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) +#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK)) +#define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET)) + +#define pud_none(pud) (!pud_val(pud)) +#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) +#define pud_present(pud) (pud_val(pud) != 0UL) +#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) + +#define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) + +/* + * The following have defined behavior only work if pte_present() is true. + */ +#define pte_user(pte) ((pte_val(pte) & _PAGE_PL_MASK) == _PAGE_PL_3) +#define pte_read(pte) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6) +#define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4) +#define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0) +#define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0) +#define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0) +#define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0) +/* + * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the + * access rights: + */ +#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW)) +#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW)) +#define pte_mkexec(pte) (__pte(pte_val(pte) | _PAGE_AR_RX)) +#define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A)) +#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) +#define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) +#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) + +/* + * Macro to a page protection value as "uncacheable". Note that "protection" is really a + * misnomer here as the protection value contains the memory attribute bits, dirty bits, + * and various other bits as well. + */ +#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC) + +/* + * Macro to make mark a page protection value as "write-combining". + * Note that "protection" is really a misnomer here as the protection + * value contains the memory attribute bits, dirty bits, and various + * other bits as well. Accesses through a write-combining translation + * works bypasses the caches, but does allow for consecutive writes to + * be combined into single (but larger) write transactions. + */ +#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC) + +static inline unsigned long +pgd_index (unsigned long address) +{ + unsigned long region = address >> 61; + unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); + + return (region << (PAGE_SHIFT - 6)) | l1index; +} + +/* The offset in the 1-level directory is given by the 3 region bits + (61..63) and the level-1 bits. */ +static inline pgd_t* +pgd_offset (struct mm_struct *mm, unsigned long address) +{ + return mm->pgd + pgd_index(address); +} + +/* In the kernel's mapped region we completely ignore the region number + (since we know it's in region number 5). */ +#define pgd_offset_k(addr) \ + (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))) + +/* Look up a pgd entry in the gate area. On IA-64, the gate-area + resides in the kernel-mapped segment, hence we use pgd_offset_k() + here. */ +#define pgd_offset_gate(mm, addr) pgd_offset_k(addr) + +/* Find an entry in the second-level page table.. */ +#define pmd_offset(dir,addr) \ + ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) + +/* + * Find an entry in the third-level page table. This looks more complicated than it + * should be because some platforms place page tables in high memory. + */ +#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +#define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) +#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) +#define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr) +#define pte_unmap(pte) do { } while (0) +#define pte_unmap_nested(pte) do { } while (0) + +/* atomic versions of the some PTE manipulations: */ + +static inline int +ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) +{ +#ifdef CONFIG_SMP + if (!pte_young(*ptep)) + return 0; + return test_and_clear_bit(_PAGE_A_BIT, ptep); +#else + pte_t pte = *ptep; + if (!pte_young(pte)) + return 0; + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); + return 1; +#endif +} + +static inline int +ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) +{ +#ifdef CONFIG_SMP + if (!pte_dirty(*ptep)) + return 0; + return test_and_clear_bit(_PAGE_D_BIT, ptep); +#else + pte_t pte = *ptep; + if (!pte_dirty(pte)) + return 0; + set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); + return 1; +#endif +} + +static inline pte_t +ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ +#ifdef CONFIG_SMP + return __pte(xchg((long *) ptep, 0)); +#else + pte_t pte = *ptep; + pte_clear(mm, addr, ptep); + return pte; +#endif +} + +static inline void +ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ +#ifdef CONFIG_SMP + unsigned long new, old; + + do { + old = pte_val(*ptep); + new = pte_val(pte_wrprotect(__pte (old))); + } while (cmpxchg((unsigned long *) ptep, old, new) != old); +#else + pte_t old_pte = *ptep; + set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); +#endif +} + +static inline int +pte_same (pte_t a, pte_t b) +{ + return pte_val(a) == pte_val(b); +} + +#define update_mmu_cache(vma, address, pte) do { } while (0) + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern void paging_init (void); + +/* + * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of + * bits in the swap-type field of the swap pte. It would be nice to + * enforce that, but we can't easily include <linux/swap.h> here. + * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...). + * + * Format of swap pte: + * bit 0 : present bit (must be zero) + * bit 1 : _PAGE_FILE (must be zero) + * bits 2- 8: swap-type + * bits 9-62: swap offset + * bit 63 : _PAGE_PROTNONE bit + * + * Format of file pte: + * bit 0 : present bit (must be zero) + * bit 1 : _PAGE_FILE (must be one) + * bits 2-62: file_offset/PAGE_SIZE + * bit 63 : _PAGE_PROTNONE bit + */ +#define __swp_type(entry) (((entry).val >> 2) & 0x7f) +#define __swp_offset(entry) (((entry).val << 1) >> 10) +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#define PTE_FILE_MAX_BITS 61 +#define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3) +#define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE }) + +/* XXX is this right? */ +#define io_remap_page_range(vma, vaddr, paddr, size, prot) \ + remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) + +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +#define MK_IOSPACE_PFN(space, pfn) (pfn) +#define GET_IOSPACE(pfn) 0 +#define GET_PFN(pfn) (pfn) + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; +extern struct page *zero_page_memmap_ptr; +#define ZERO_PAGE(vaddr) (zero_page_memmap_ptr) + +/* We provide our own get_unmapped_area to cope with VA holes for userland */ +#define HAVE_ARCH_UNMAPPED_AREA + +#ifdef CONFIG_HUGETLB_PAGE +#define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3)) +#define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) +#define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) +struct mmu_gather; +extern void hugetlb_free_pgtables(struct mmu_gather *tlb, + struct vm_area_struct * prev, unsigned long start, unsigned long end); +#endif + +/* + * IA-64 doesn't have any external MMU info: the page tables contain all the necessary + * information. However, we use this routine to take care of any (delayed) i-cache + * flushing that may be necessary. + */ +extern void lazy_mmu_prot_update (pte_t pte); + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +/* + * Update PTEP with ENTRY, which is guaranteed to be a less + * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and + * WRITABLE bits turned on, when the value at PTEP did not. The + * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE. + * + * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without + * having to worry about races. On SMP machines, there are only two + * cases where this is true: + * + * (1) *PTEP has the PRESENT bit turned OFF + * (2) ENTRY has the DIRTY bit turned ON + * + * On ia64, we could implement this routine with a cmpxchg()-loop + * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY. + * However, like on x86, we can get a more streamlined version by + * observing that it is OK to drop ACCESSED bit updates when + * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is + * result in an extra Access-bit fault, which would then turn on the + * ACCESSED bit in the low-level fault handler (iaccess_bit or + * daccess_bit in ivt.S). + */ +#ifdef CONFIG_SMP +# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ +do { \ + if (__safely_writable) { \ + set_pte(__ptep, __entry); \ + flush_tlb_page(__vma, __addr); \ + } \ +} while (0) +#else +# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ + ptep_establish(__vma, __addr, __ptep, __entry) +#endif + +# ifdef CONFIG_VIRTUAL_MEM_MAP + /* arch mem_map init routine is needed due to holes in a virtual mem_map */ +# define __HAVE_ARCH_MEMMAP_INIT + extern void memmap_init (unsigned long size, int nid, unsigned long zone, + unsigned long start_pfn); +# endif /* CONFIG_VIRTUAL_MEM_MAP */ +# endif /* !__ASSEMBLY__ */ + +/* + * Identity-mapped regions use a large page size. We'll call such large pages + * "granules". If you can think of a better name that's unambiguous, let me + * know... + */ +#if defined(CONFIG_IA64_GRANULE_64MB) +# define IA64_GRANULE_SHIFT _PAGE_SIZE_64M +#elif defined(CONFIG_IA64_GRANULE_16MB) +# define IA64_GRANULE_SHIFT _PAGE_SIZE_16M +#endif +#define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT) +/* + * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL): + */ +#define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M +#define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT) + +/* + * No page table caches to initialise + */ +#define pgtable_cache_init() do { } while (0) + +/* These tell get_user_pages() that the first gate page is accessible from user-level. */ +#define FIXADDR_USER_START GATE_ADDR +#define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE) + +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +#define __HAVE_ARCH_PTE_SAME +#define __HAVE_ARCH_PGD_OFFSET_GATE +#define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE + +/* + * Override for pgd_addr_end() to deal with the virtual address space holes + * in each region. In regions 0..4 virtual address bits are used like this: + * +--------+------+--------+-----+-----+--------+ + * | pgdhi3 | rsvd | pgdlow | pmd | pte | offset | + * +--------+------+--------+-----+-----+--------+ + * 'pgdlow' overflows to pgdhi3 (a.k.a. region bits) leaving rsvd==0 + */ +#define IA64_PGD_OVERFLOW (PGDIR_SIZE << (PAGE_SHIFT-6)) + +#define pgd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ + if (REGION_NUMBER(__boundary) < 5 && \ + __boundary & IA64_PGD_OVERFLOW) \ + __boundary += (RGN_SIZE - 1) & ~(IA64_PGD_OVERFLOW - 1);\ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#define pmd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ + if (REGION_NUMBER(__boundary) < 5 && \ + __boundary & IA64_PGD_OVERFLOW) \ + __boundary += (RGN_SIZE - 1) & ~(IA64_PGD_OVERFLOW - 1);\ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#include <asm-generic/pgtable-nopud.h> +#include <asm-generic/pgtable.h> + +#endif /* _ASM_IA64_PGTABLE_H */ diff --git a/include/asm-ia64/poll.h b/include/asm-ia64/poll.h new file mode 100644 index 000000000000..160258a0528d --- /dev/null +++ b/include/asm-ia64/poll.h @@ -0,0 +1,31 @@ +#ifndef _ASM_IA64_POLL_H +#define _ASM_IA64_POLL_H + +/* + * poll(2) bit definitions. Based on <asm-i386/poll.h>. + * + * Modified 1998, 1999, 2002 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +#define POLLIN 0x0001 +#define POLLPRI 0x0002 +#define POLLOUT 0x0004 +#define POLLERR 0x0008 +#define POLLHUP 0x0010 +#define POLLNVAL 0x0020 + +#define POLLRDNORM 0x0040 +#define POLLRDBAND 0x0080 +#define POLLWRNORM 0x0100 +#define POLLWRBAND 0x0200 +#define POLLMSG 0x0400 +#define POLLREMOVE 0x1000 + +struct pollfd { + int fd; + short events; + short revents; +}; + +#endif /* _ASM_IA64_POLL_H */ diff --git a/include/asm-ia64/posix_types.h b/include/asm-ia64/posix_types.h new file mode 100644 index 000000000000..adb62272694f --- /dev/null +++ b/include/asm-ia64/posix_types.h @@ -0,0 +1,126 @@ +#ifndef _ASM_IA64_POSIX_TYPES_H +#define _ASM_IA64_POSIX_TYPES_H + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + * + * Based on <asm-alpha/posix_types.h>. + * + * Modified 1998-2000, 2003 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +typedef unsigned long __kernel_ino_t; +typedef unsigned int __kernel_mode_t; +typedef unsigned int __kernel_nlink_t; +typedef long __kernel_off_t; +typedef long long __kernel_loff_t; +typedef int __kernel_pid_t; +typedef int __kernel_ipc_pid_t; +typedef unsigned int __kernel_uid_t; +typedef unsigned int __kernel_gid_t; +typedef unsigned long __kernel_size_t; +typedef long __kernel_ssize_t; +typedef long __kernel_ptrdiff_t; +typedef long __kernel_time_t; +typedef long __kernel_suseconds_t; +typedef long __kernel_clock_t; +typedef int __kernel_timer_t; +typedef int __kernel_clockid_t; +typedef int __kernel_daddr_t; +typedef char * __kernel_caddr_t; +typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; + +typedef struct { + int val[2]; +} __kernel_fsid_t; + +typedef __kernel_uid_t __kernel_old_uid_t; +typedef __kernel_gid_t __kernel_old_gid_t; +typedef __kernel_uid_t __kernel_uid32_t; +typedef __kernel_gid_t __kernel_gid32_t; + +typedef unsigned int __kernel_old_dev_t; + +# ifdef __KERNEL__ + +# ifndef __GNUC__ + +#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d)) +#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d)) +#define __FD_ISSET(d, set) (((set)->fds_bits[__FDELT(d)] & __FDMASK(d)) != 0) +#define __FD_ZERO(set) \ + ((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set))) + +# else /* !__GNUC__ */ + +/* With GNU C, use inline functions instead so args are evaluated only once: */ + +#undef __FD_SET +static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp) +{ + unsigned long _tmp = fd / __NFDBITS; + unsigned long _rem = fd % __NFDBITS; + fdsetp->fds_bits[_tmp] |= (1UL<<_rem); +} + +#undef __FD_CLR +static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp) +{ + unsigned long _tmp = fd / __NFDBITS; + unsigned long _rem = fd % __NFDBITS; + fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem); +} + +#undef __FD_ISSET +static __inline__ int __FD_ISSET(unsigned long fd, const __kernel_fd_set *p) +{ + unsigned long _tmp = fd / __NFDBITS; + unsigned long _rem = fd % __NFDBITS; + return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0; +} + +/* + * This will unroll the loop for the normal constant case (8 ints, + * for a 256-bit fd_set) + */ +#undef __FD_ZERO +static __inline__ void __FD_ZERO(__kernel_fd_set *p) +{ + unsigned long *tmp = p->fds_bits; + int i; + + if (__builtin_constant_p(__FDSET_LONGS)) { + switch (__FDSET_LONGS) { + case 16: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; + tmp[ 8] = 0; tmp[ 9] = 0; tmp[10] = 0; tmp[11] = 0; + tmp[12] = 0; tmp[13] = 0; tmp[14] = 0; tmp[15] = 0; + return; + + case 8: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + tmp[ 4] = 0; tmp[ 5] = 0; tmp[ 6] = 0; tmp[ 7] = 0; + return; + + case 4: + tmp[ 0] = 0; tmp[ 1] = 0; tmp[ 2] = 0; tmp[ 3] = 0; + return; + } + } + i = __FDSET_LONGS; + while (i) { + i--; + *tmp = 0; + tmp++; + } +} + +# endif /* !__GNUC__ */ +# endif /* __KERNEL__ */ +#endif /* _ASM_IA64_POSIX_TYPES_H */ diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h new file mode 100644 index 000000000000..8769dd9df369 --- /dev/null +++ b/include/asm-ia64/processor.h @@ -0,0 +1,698 @@ +#ifndef _ASM_IA64_PROCESSOR_H +#define _ASM_IA64_PROCESSOR_H + +/* + * Copyright (C) 1998-2004 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Stephane Eranian <eranian@hpl.hp.com> + * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> + * + * 11/24/98 S.Eranian added ia64_set_iva() + * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API + * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support + */ + +#include <linux/config.h> + +#include <asm/intrinsics.h> +#include <asm/kregs.h> +#include <asm/ptrace.h> +#include <asm/ustack.h> + +/* Our arch specific arch_init_sched_domain is in arch/ia64/kernel/domain.c */ +#define ARCH_HAS_SCHED_DOMAIN + +#define IA64_NUM_DBG_REGS 8 +/* + * Limits for PMC and PMD are set to less than maximum architected values + * but should be sufficient for a while + */ +#define IA64_NUM_PMC_REGS 32 +#define IA64_NUM_PMD_REGS 32 + +#define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000) +#define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000) + +/* + * TASK_SIZE really is a mis-named. It really is the maximum user + * space address (plus one). On IA-64, there are five regions of 2TB + * each (assuming 8KB page size), for a total of 8TB of user virtual + * address space. + */ +#define TASK_SIZE (current->thread.task_size) + +/* + * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for + * address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE, + * because the kernel may have installed helper-mappings above TASK_SIZE. For example, + * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE. + */ +#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE (current->thread.map_base) + +#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */ +#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */ +#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ +#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ +#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ + /* bit 5 is currently unused */ +#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ +#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ + +#define IA64_THREAD_UAC_SHIFT 3 +#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS) +#define IA64_THREAD_FPEMU_SHIFT 6 +#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE) + + +/* + * This shift should be large enough to be able to represent 1000000000/itc_freq with good + * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits + * (this will give enough slack to represent 10 seconds worth of time as a scaled number). + */ +#define IA64_NSEC_PER_CYC_SHIFT 30 + +#ifndef __ASSEMBLY__ + +#include <linux/cache.h> +#include <linux/compiler.h> +#include <linux/threads.h> +#include <linux/types.h> + +#include <asm/fpu.h> +#include <asm/page.h> +#include <asm/percpu.h> +#include <asm/rse.h> +#include <asm/unwind.h> +#include <asm/atomic.h> +#ifdef CONFIG_NUMA +#include <asm/nodedata.h> +#endif + +/* like above but expressed as bitfields for more efficient access: */ +struct ia64_psr { + __u64 reserved0 : 1; + __u64 be : 1; + __u64 up : 1; + __u64 ac : 1; + __u64 mfl : 1; + __u64 mfh : 1; + __u64 reserved1 : 7; + __u64 ic : 1; + __u64 i : 1; + __u64 pk : 1; + __u64 reserved2 : 1; + __u64 dt : 1; + __u64 dfl : 1; + __u64 dfh : 1; + __u64 sp : 1; + __u64 pp : 1; + __u64 di : 1; + __u64 si : 1; + __u64 db : 1; + __u64 lp : 1; + __u64 tb : 1; + __u64 rt : 1; + __u64 reserved3 : 4; + __u64 cpl : 2; + __u64 is : 1; + __u64 mc : 1; + __u64 it : 1; + __u64 id : 1; + __u64 da : 1; + __u64 dd : 1; + __u64 ss : 1; + __u64 ri : 2; + __u64 ed : 1; + __u64 bn : 1; + __u64 reserved4 : 19; +}; + +/* + * CPU type, hardware bug flags, and per-CPU state. Frequently used + * state comes earlier: + */ +struct cpuinfo_ia64 { + __u32 softirq_pending; + __u64 itm_delta; /* # of clock cycles between clock ticks */ + __u64 itm_next; /* interval timer mask value to use for next clock tick */ + __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */ + __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ + __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ + __u64 *pgd_quick; + __u64 *pmd_quick; + __u64 pgtable_cache_sz; + __u64 itc_freq; /* frequency of ITC counter */ + __u64 proc_freq; /* frequency of processor */ + __u64 cyc_per_usec; /* itc_freq/1000000 */ + __u64 ptce_base; + __u32 ptce_count[2]; + __u32 ptce_stride[2]; + struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ + +#ifdef CONFIG_SMP + __u64 loops_per_jiffy; + int cpu; +#endif + + /* CPUID-derived information: */ + __u64 ppn; + __u64 features; + __u8 number; + __u8 revision; + __u8 model; + __u8 family; + __u8 archrev; + char vendor[16]; + +#ifdef CONFIG_NUMA + struct ia64_node_data *node_data; +#endif +}; + +DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); + +/* + * The "local" data variable. It refers to the per-CPU data of the currently executing + * CPU, much like "current" points to the per-task data of the currently executing task. + * Do not use the address of local_cpu_data, since it will be different from + * cpu_data(smp_processor_id())! + */ +#define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) +#define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) + +extern void identify_cpu (struct cpuinfo_ia64 *); +extern void print_cpu_info (struct cpuinfo_ia64 *); + +typedef struct { + unsigned long seg; +} mm_segment_t; + +#define SET_UNALIGN_CTL(task,value) \ +({ \ + (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ + | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \ + 0; \ +}) +#define GET_UNALIGN_CTL(task,addr) \ +({ \ + put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \ + (int __user *) (addr)); \ +}) + +#define SET_FPEMU_CTL(task,value) \ +({ \ + (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \ + | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \ + 0; \ +}) +#define GET_FPEMU_CTL(task,addr) \ +({ \ + put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \ + (int __user *) (addr)); \ +}) + +#ifdef CONFIG_IA32_SUPPORT +struct desc_struct { + unsigned int a, b; +}; + +#define desc_empty(desc) (!((desc)->a + (desc)->b)) +#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) + +#define GDT_ENTRY_TLS_ENTRIES 3 +#define GDT_ENTRY_TLS_MIN 6 +#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) + +#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) + +struct partial_page_list; +#endif + +struct thread_struct { + __u32 flags; /* various thread flags (see IA64_THREAD_*) */ + /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ + __u8 on_ustack; /* executing on user-stacks? */ + __u8 pad[3]; + __u64 ksp; /* kernel stack pointer */ + __u64 map_base; /* base address for get_unmapped_area() */ + __u64 task_size; /* limit for task size */ + __u64 rbs_bot; /* the base address for the RBS */ + int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ + +#ifdef CONFIG_IA32_SUPPORT + __u64 eflag; /* IA32 EFLAGS reg */ + __u64 fsr; /* IA32 floating pt status reg */ + __u64 fcr; /* IA32 floating pt control reg */ + __u64 fir; /* IA32 fp except. instr. reg */ + __u64 fdr; /* IA32 fp except. data reg */ + __u64 old_k1; /* old value of ar.k1 */ + __u64 old_iob; /* old IOBase value */ + struct partial_page_list *ppl; /* partial page list for 4K page size issue */ + /* cached TLS descriptors. */ + struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; + +# define INIT_THREAD_IA32 .eflag = 0, \ + .fsr = 0, \ + .fcr = 0x17800000037fULL, \ + .fir = 0, \ + .fdr = 0, \ + .old_k1 = 0, \ + .old_iob = 0, \ + .ppl = NULL, +#else +# define INIT_THREAD_IA32 +#endif /* CONFIG_IA32_SUPPORT */ +#ifdef CONFIG_PERFMON + __u64 pmcs[IA64_NUM_PMC_REGS]; + __u64 pmds[IA64_NUM_PMD_REGS]; + void *pfm_context; /* pointer to detailed PMU context */ + unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ +# define INIT_THREAD_PM .pmcs = {0UL, }, \ + .pmds = {0UL, }, \ + .pfm_context = NULL, \ + .pfm_needs_checking = 0UL, +#else +# define INIT_THREAD_PM +#endif + __u64 dbr[IA64_NUM_DBG_REGS]; + __u64 ibr[IA64_NUM_DBG_REGS]; + struct ia64_fpreg fph[96]; /* saved/loaded on demand */ +}; + +#define INIT_THREAD { \ + .flags = 0, \ + .on_ustack = 0, \ + .ksp = 0, \ + .map_base = DEFAULT_MAP_BASE, \ + .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ + .task_size = DEFAULT_TASK_SIZE, \ + .last_fph_cpu = -1, \ + INIT_THREAD_IA32 \ + INIT_THREAD_PM \ + .dbr = {0, }, \ + .ibr = {0, }, \ + .fph = {{{{0}}}, } \ +} + +#define start_thread(regs,new_ip,new_sp) do { \ + set_fs(USER_DS); \ + regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \ + & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \ + regs->cr_iip = new_ip; \ + regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ + regs->ar_rnat = 0; \ + regs->ar_bspstore = current->thread.rbs_bot; \ + regs->ar_fpsr = FPSR_DEFAULT; \ + regs->loadrs = 0; \ + regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \ + regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ + if (unlikely(!current->mm->dumpable)) { \ + /* \ + * Zap scratch regs to avoid leaking bits between processes with different \ + * uid/privileges. \ + */ \ + regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \ + regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \ + } \ +} while (0) + +/* Forward declarations, a strange C thing... */ +struct mm_struct; +struct task_struct; + +/* + * Free all resources held by a thread. This is called after the + * parent of DEAD_TASK has collected the exit status of the task via + * wait(). + */ +#define release_thread(dead_task) + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +/* + * This is the mechanism for creating a new kernel thread. + * + * NOTE 1: Only a kernel-only process (ie the swapper or direct + * descendants who haven't done an "execve()") should use this: it + * will work within a system call from a "real" process, but the + * process memory space will not be free'd until both the parent and + * the child have exited. + * + * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get + * into trouble in init/main.c when the child thread returns to + * do_basic_setup() and the timing is such that free_initmem() has + * been called already. + */ +extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags); + +/* Get wait channel for task P. */ +extern unsigned long get_wchan (struct task_struct *p); + +/* Return instruction pointer of blocked task TSK. */ +#define KSTK_EIP(tsk) \ + ({ \ + struct pt_regs *_regs = ia64_task_regs(tsk); \ + _regs->cr_iip + ia64_psr(_regs)->ri; \ + }) + +/* Return stack pointer of blocked task TSK. */ +#define KSTK_ESP(tsk) ((tsk)->thread.ksp) + +extern void ia64_getreg_unknown_kr (void); +extern void ia64_setreg_unknown_kr (void); + +#define ia64_get_kr(regnum) \ +({ \ + unsigned long r = 0; \ + \ + switch (regnum) { \ + case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \ + case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \ + case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \ + case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \ + case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \ + case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \ + case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \ + case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \ + default: ia64_getreg_unknown_kr(); break; \ + } \ + r; \ +}) + +#define ia64_set_kr(regnum, r) \ +({ \ + switch (regnum) { \ + case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \ + case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \ + case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \ + case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \ + case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \ + case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \ + case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \ + case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \ + default: ia64_setreg_unknown_kr(); break; \ + } \ +}) + +/* + * The following three macros can't be inline functions because we don't have struct + * task_struct at this point. + */ + +/* Return TRUE if task T owns the fph partition of the CPU we're running on. */ +#define ia64_is_local_fpu_owner(t) \ +({ \ + struct task_struct *__ia64_islfo_task = (t); \ + (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \ + && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \ +}) + +/* Mark task T as owning the fph partition of the CPU we're running on. */ +#define ia64_set_local_fpu_owner(t) do { \ + struct task_struct *__ia64_slfo_task = (t); \ + __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \ + ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \ +} while (0) + +/* Mark the fph partition of task T as being invalid on all CPUs. */ +#define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1) + +extern void __ia64_init_fpu (void); +extern void __ia64_save_fpu (struct ia64_fpreg *fph); +extern void __ia64_load_fpu (struct ia64_fpreg *fph); +extern void ia64_save_debug_regs (unsigned long *save_area); +extern void ia64_load_debug_regs (unsigned long *save_area); + +#ifdef CONFIG_IA32_SUPPORT +extern void ia32_save_state (struct task_struct *task); +extern void ia32_load_state (struct task_struct *task); +#endif + +#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) +#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) + +/* load fp 0.0 into fph */ +static inline void +ia64_init_fpu (void) { + ia64_fph_enable(); + __ia64_init_fpu(); + ia64_fph_disable(); +} + +/* save f32-f127 at FPH */ +static inline void +ia64_save_fpu (struct ia64_fpreg *fph) { + ia64_fph_enable(); + __ia64_save_fpu(fph); + ia64_fph_disable(); +} + +/* load f32-f127 from FPH */ +static inline void +ia64_load_fpu (struct ia64_fpreg *fph) { + ia64_fph_enable(); + __ia64_load_fpu(fph); + ia64_fph_disable(); +} + +static inline __u64 +ia64_clear_ic (void) +{ + __u64 psr; + psr = ia64_getreg(_IA64_REG_PSR); + ia64_stop(); + ia64_rsm(IA64_PSR_I | IA64_PSR_IC); + ia64_srlz_i(); + return psr; +} + +/* + * Restore the psr. + */ +static inline void +ia64_set_psr (__u64 psr) +{ + ia64_stop(); + ia64_setreg(_IA64_REG_PSR_L, psr); + ia64_srlz_d(); +} + +/* + * Insert a translation into an instruction and/or data translation + * register. + */ +static inline void +ia64_itr (__u64 target_mask, __u64 tr_num, + __u64 vmaddr, __u64 pte, + __u64 log_page_size) +{ + ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); + ia64_setreg(_IA64_REG_CR_IFA, vmaddr); + ia64_stop(); + if (target_mask & 0x1) + ia64_itri(tr_num, pte); + if (target_mask & 0x2) + ia64_itrd(tr_num, pte); +} + +/* + * Insert a translation into the instruction and/or data translation + * cache. + */ +static inline void +ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, + __u64 log_page_size) +{ + ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); + ia64_setreg(_IA64_REG_CR_IFA, vmaddr); + ia64_stop(); + /* as per EAS2.6, itc must be the last instruction in an instruction group */ + if (target_mask & 0x1) + ia64_itci(pte); + if (target_mask & 0x2) + ia64_itcd(pte); +} + +/* + * Purge a range of addresses from instruction and/or data translation + * register(s). + */ +static inline void +ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size) +{ + if (target_mask & 0x1) + ia64_ptri(vmaddr, (log_size << 2)); + if (target_mask & 0x2) + ia64_ptrd(vmaddr, (log_size << 2)); +} + +/* Set the interrupt vector address. The address must be suitably aligned (32KB). */ +static inline void +ia64_set_iva (void *ivt_addr) +{ + ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr); + ia64_srlz_i(); +} + +/* Set the page table address and control bits. */ +static inline void +ia64_set_pta (__u64 pta) +{ + /* Note: srlz.i implies srlz.d */ + ia64_setreg(_IA64_REG_CR_PTA, pta); + ia64_srlz_i(); +} + +static inline void +ia64_eoi (void) +{ + ia64_setreg(_IA64_REG_CR_EOI, 0); + ia64_srlz_d(); +} + +#define cpu_relax() ia64_hint(ia64_hint_pause) + +static inline void +ia64_set_lrr0 (unsigned long val) +{ + ia64_setreg(_IA64_REG_CR_LRR0, val); + ia64_srlz_d(); +} + +static inline void +ia64_set_lrr1 (unsigned long val) +{ + ia64_setreg(_IA64_REG_CR_LRR1, val); + ia64_srlz_d(); +} + + +/* + * Given the address to which a spill occurred, return the unat bit + * number that corresponds to this address. + */ +static inline __u64 +ia64_unat_pos (void *spill_addr) +{ + return ((__u64) spill_addr >> 3) & 0x3f; +} + +/* + * Set the NaT bit of an integer register which was spilled at address + * SPILL_ADDR. UNAT is the mask to be updated. + */ +static inline void +ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat) +{ + __u64 bit = ia64_unat_pos(spill_addr); + __u64 mask = 1UL << bit; + + *unat = (*unat & ~mask) | (nat << bit); +} + +/* + * Return saved PC of a blocked thread. + * Note that the only way T can block is through a call to schedule() -> switch_to(). + */ +static inline unsigned long +thread_saved_pc (struct task_struct *t) +{ + struct unw_frame_info info; + unsigned long ip; + + unw_init_from_blocked_task(&info, t); + if (unw_unwind(&info) < 0) + return 0; + unw_get_ip(&info, &ip); + return ip; +} + +/* + * Get the current instruction/program counter value. + */ +#define current_text_addr() \ + ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; }) + +static inline __u64 +ia64_get_ivr (void) +{ + __u64 r; + ia64_srlz_d(); + r = ia64_getreg(_IA64_REG_CR_IVR); + ia64_srlz_d(); + return r; +} + +static inline void +ia64_set_dbr (__u64 regnum, __u64 value) +{ + __ia64_set_dbr(regnum, value); +#ifdef CONFIG_ITANIUM + ia64_srlz_d(); +#endif +} + +static inline __u64 +ia64_get_dbr (__u64 regnum) +{ + __u64 retval; + + retval = __ia64_get_dbr(regnum); +#ifdef CONFIG_ITANIUM + ia64_srlz_d(); +#endif + return retval; +} + +static inline __u64 +ia64_rotr (__u64 w, __u64 n) +{ + return (w >> n) | (w << (64 - n)); +} + +#define ia64_rotl(w,n) ia64_rotr((w), (64) - (n)) + +/* + * Take a mapped kernel address and return the equivalent address + * in the region 7 identity mapped virtual area. + */ +static inline void * +ia64_imva (void *addr) +{ + void *result; + result = (void *) ia64_tpa(addr); + return __va(result); +} + +#define ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCHW +#define ARCH_HAS_SPINLOCK_PREFETCH +#define PREFETCH_STRIDE L1_CACHE_BYTES + +static inline void +prefetch (const void *x) +{ + ia64_lfetch(ia64_lfhint_none, x); +} + +static inline void +prefetchw (const void *x) +{ + ia64_lfetch_excl(ia64_lfhint_none, x); +} + +#define spin_lock_prefetch(x) prefetchw(x) + +extern unsigned long boot_option_idle_override; + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_IA64_PROCESSOR_H */ diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h new file mode 100644 index 000000000000..0bef19538406 --- /dev/null +++ b/include/asm-ia64/ptrace.h @@ -0,0 +1,337 @@ +#ifndef _ASM_IA64_PTRACE_H +#define _ASM_IA64_PTRACE_H + +/* + * Copyright (C) 1998-2004 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Stephane Eranian <eranian@hpl.hp.com> + * Copyright (C) 2003 Intel Co + * Suresh Siddha <suresh.b.siddha@intel.com> + * Fenghua Yu <fenghua.yu@intel.com> + * Arun Sharma <arun.sharma@intel.com> + * + * 12/07/98 S. Eranian added pt_regs & switch_stack + * 12/21/98 D. Mosberger updated to match latest code + * 6/17/99 D. Mosberger added second unat member to "struct switch_stack" + * + */ +/* + * When a user process is blocked, its state looks as follows: + * + * +----------------------+ ------- IA64_STK_OFFSET + * | | ^ + * | struct pt_regs | | + * | | | + * +----------------------+ | + * | | | + * | memory stack | | + * | (growing downwards) | | + * //.....................// | + * | + * //.....................// | + * | | | + * +----------------------+ | + * | struct switch_stack | | + * | | | + * +----------------------+ | + * | | | + * //.....................// | + * | + * //.....................// | + * | | | + * | register stack | | + * | (growing upwards) | | + * | | | + * +----------------------+ | --- IA64_RBS_OFFSET + * | struct thread_info | | ^ + * +----------------------+ | | + * | | | | + * | struct task_struct | | | + * current -> | | | | + * +----------------------+ ------- + * + * Note that ar.ec is not saved explicitly in pt_reg or switch_stack. + * This is because ar.ec is saved as part of ar.pfs. + */ + +#include <linux/config.h> + +#include <asm/fpu.h> +#include <asm/offsets.h> + +/* + * Base-2 logarithm of number of pages to allocate per task structure + * (including register backing store and memory stack): + */ +#if defined(CONFIG_IA64_PAGE_SIZE_4KB) +# define KERNEL_STACK_SIZE_ORDER 3 +#elif defined(CONFIG_IA64_PAGE_SIZE_8KB) +# define KERNEL_STACK_SIZE_ORDER 2 +#elif defined(CONFIG_IA64_PAGE_SIZE_16KB) +# define KERNEL_STACK_SIZE_ORDER 1 +#else +# define KERNEL_STACK_SIZE_ORDER 0 +#endif + +#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 15) & ~15) +#define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE) + +#define KERNEL_STACK_SIZE IA64_STK_OFFSET + +#ifndef __ASSEMBLY__ + +#include <asm/current.h> +#include <asm/page.h> + +/* + * This struct defines the way the registers are saved on system + * calls. + * + * We don't save all floating point register because the kernel + * is compiled to use only a very small subset, so the other are + * untouched. + * + * THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE + * (because the memory stack pointer MUST ALWAYS be aligned this way) + * + */ +struct pt_regs { + /* The following registers are saved by SAVE_MIN: */ + unsigned long b6; /* scratch */ + unsigned long b7; /* scratch */ + + unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ + unsigned long ar_ssd; /* reserved for future use (scratch) */ + + unsigned long r8; /* scratch (return value register 0) */ + unsigned long r9; /* scratch (return value register 1) */ + unsigned long r10; /* scratch (return value register 2) */ + unsigned long r11; /* scratch (return value register 3) */ + + unsigned long cr_ipsr; /* interrupted task's psr */ + unsigned long cr_iip; /* interrupted task's instruction pointer */ + /* + * interrupted task's function state; if bit 63 is cleared, it + * contains syscall's ar.pfs.pfm: + */ + unsigned long cr_ifs; + + unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ + unsigned long ar_pfs; /* prev function state */ + unsigned long ar_rsc; /* RSE configuration */ + /* The following two are valid only if cr_ipsr.cpl > 0: */ + unsigned long ar_rnat; /* RSE NaT */ + unsigned long ar_bspstore; /* RSE bspstore */ + + unsigned long pr; /* 64 predicate registers (1 bit each) */ + unsigned long b0; /* return pointer (bp) */ + unsigned long loadrs; /* size of dirty partition << 16 */ + + unsigned long r1; /* the gp pointer */ + unsigned long r12; /* interrupted task's memory stack pointer */ + unsigned long r13; /* thread pointer */ + + unsigned long ar_fpsr; /* floating point status (preserved) */ + unsigned long r15; /* scratch */ + + /* The remaining registers are NOT saved for system calls. */ + + unsigned long r14; /* scratch */ + unsigned long r2; /* scratch */ + unsigned long r3; /* scratch */ + + /* The following registers are saved by SAVE_REST: */ + unsigned long r16; /* scratch */ + unsigned long r17; /* scratch */ + unsigned long r18; /* scratch */ + unsigned long r19; /* scratch */ + unsigned long r20; /* scratch */ + unsigned long r21; /* scratch */ + unsigned long r22; /* scratch */ + unsigned long r23; /* scratch */ + unsigned long r24; /* scratch */ + unsigned long r25; /* scratch */ + unsigned long r26; /* scratch */ + unsigned long r27; /* scratch */ + unsigned long r28; /* scratch */ + unsigned long r29; /* scratch */ + unsigned long r30; /* scratch */ + unsigned long r31; /* scratch */ + + unsigned long ar_ccv; /* compare/exchange value (scratch) */ + + /* + * Floating point registers that the kernel considers scratch: + */ + struct ia64_fpreg f6; /* scratch */ + struct ia64_fpreg f7; /* scratch */ + struct ia64_fpreg f8; /* scratch */ + struct ia64_fpreg f9; /* scratch */ + struct ia64_fpreg f10; /* scratch */ + struct ia64_fpreg f11; /* scratch */ +}; + +/* + * This structure contains the addition registers that need to + * preserved across a context switch. This generally consists of + * "preserved" registers. + */ +struct switch_stack { + unsigned long caller_unat; /* user NaT collection register (preserved) */ + unsigned long ar_fpsr; /* floating-point status register */ + + struct ia64_fpreg f2; /* preserved */ + struct ia64_fpreg f3; /* preserved */ + struct ia64_fpreg f4; /* preserved */ + struct ia64_fpreg f5; /* preserved */ + + struct ia64_fpreg f12; /* scratch, but untouched by kernel */ + struct ia64_fpreg f13; /* scratch, but untouched by kernel */ + struct ia64_fpreg f14; /* scratch, but untouched by kernel */ + struct ia64_fpreg f15; /* scratch, but untouched by kernel */ + struct ia64_fpreg f16; /* preserved */ + struct ia64_fpreg f17; /* preserved */ + struct ia64_fpreg f18; /* preserved */ + struct ia64_fpreg f19; /* preserved */ + struct ia64_fpreg f20; /* preserved */ + struct ia64_fpreg f21; /* preserved */ + struct ia64_fpreg f22; /* preserved */ + struct ia64_fpreg f23; /* preserved */ + struct ia64_fpreg f24; /* preserved */ + struct ia64_fpreg f25; /* preserved */ + struct ia64_fpreg f26; /* preserved */ + struct ia64_fpreg f27; /* preserved */ + struct ia64_fpreg f28; /* preserved */ + struct ia64_fpreg f29; /* preserved */ + struct ia64_fpreg f30; /* preserved */ + struct ia64_fpreg f31; /* preserved */ + + unsigned long r4; /* preserved */ + unsigned long r5; /* preserved */ + unsigned long r6; /* preserved */ + unsigned long r7; /* preserved */ + + unsigned long b0; /* so we can force a direct return in copy_thread */ + unsigned long b1; + unsigned long b2; + unsigned long b3; + unsigned long b4; + unsigned long b5; + + unsigned long ar_pfs; /* previous function state */ + unsigned long ar_lc; /* loop counter (preserved) */ + unsigned long ar_unat; /* NaT bits for r4-r7 */ + unsigned long ar_rnat; /* RSE NaT collection register */ + unsigned long ar_bspstore; /* RSE dirty base (preserved) */ + unsigned long pr; /* 64 predicate registers (1 bit each) */ +}; + +#ifdef __KERNEL__ +/* + * We use the ia64_psr(regs)->ri to determine which of the three + * instructions in bundle (16 bytes) took the sample. Generate + * the canonical representation by adding to instruction pointer. + */ +# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri) +/* Conserve space in histogram by encoding slot bits in address + * bits 2 and 3 rather than bits 0 and 1. + */ +#define profile_pc(regs) \ +({ \ + unsigned long __ip = instruction_pointer(regs); \ + (__ip & ~3UL) + ((__ip & 3UL) << 2); \ +}) + + /* given a pointer to a task_struct, return the user's pt_regs */ +# define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) +# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) +# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0) +# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs)) +# define fsys_mode(task,regs) \ + ({ \ + struct task_struct *_task = (task); \ + struct pt_regs *_regs = (regs); \ + !user_mode(_regs) && user_stack(_task, _regs); \ + }) + + /* + * System call handlers that, upon successful completion, need to return a negative value + * should call force_successful_syscall_return() right before returning. On architectures + * where the syscall convention provides for a separate error flag (e.g., alpha, ia64, + * ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error + * flag will not get set. On architectures which do not support a separate error flag, + * the macro is a no-op and the spurious error condition needs to be filtered out by some + * other means (e.g., in user-level, by passing an extra argument to the syscall handler, + * or something along those lines). + * + * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall. + */ +# define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0) + + struct task_struct; /* forward decl */ + struct unw_frame_info; /* forward decl */ + + extern void show_regs (struct pt_regs *); + extern void ia64_do_show_stack (struct unw_frame_info *, void *); + extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *, + unsigned long *); + extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long, + unsigned long, long *); + extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long, + unsigned long, long); + extern void ia64_flush_fph (struct task_struct *); + extern void ia64_sync_fph (struct task_struct *); + extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *, + unsigned long, unsigned long); + + /* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */ + extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat); + /* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */ + extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat); + + extern void ia64_increment_ip (struct pt_regs *pt); + extern void ia64_decrement_ip (struct pt_regs *pt); + +#endif /* !__KERNEL__ */ + +/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */ +struct pt_all_user_regs { + unsigned long nat; + unsigned long cr_iip; + unsigned long cfm; + unsigned long cr_ipsr; + unsigned long pr; + + unsigned long gr[32]; + unsigned long br[8]; + unsigned long ar[128]; + struct ia64_fpreg fr[128]; +}; + +#endif /* !__ASSEMBLY__ */ + +/* indices to application-registers array in pt_all_user_regs */ +#define PT_AUR_RSC 16 +#define PT_AUR_BSP 17 +#define PT_AUR_BSPSTORE 18 +#define PT_AUR_RNAT 19 +#define PT_AUR_CCV 32 +#define PT_AUR_UNAT 36 +#define PT_AUR_FPSR 40 +#define PT_AUR_PFS 64 +#define PT_AUR_LC 65 +#define PT_AUR_EC 66 + +/* + * The numbers chosen here are somewhat arbitrary but absolutely MUST + * not overlap with any of the number assigned in <linux/ptrace.h>. + */ +#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */ +#define PTRACE_OLD_GETSIGINFO 13 /* (replaced by PTRACE_GETSIGINFO in <linux/ptrace.h>) */ +#define PTRACE_OLD_SETSIGINFO 14 /* (replaced by PTRACE_SETSIGINFO in <linux/ptrace.h>) */ +#define PTRACE_GETREGS 18 /* get all registers (pt_all_user_regs) in one shot */ +#define PTRACE_SETREGS 19 /* set all registers (pt_all_user_regs) in one shot */ + +#define PTRACE_OLDSETOPTIONS 21 + +#endif /* _ASM_IA64_PTRACE_H */ diff --git a/include/asm-ia64/ptrace_offsets.h b/include/asm-ia64/ptrace_offsets.h new file mode 100644 index 000000000000..b712773c759e --- /dev/null +++ b/include/asm-ia64/ptrace_offsets.h @@ -0,0 +1,268 @@ +#ifndef _ASM_IA64_PTRACE_OFFSETS_H +#define _ASM_IA64_PTRACE_OFFSETS_H + +/* + * Copyright (C) 1999, 2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ +/* + * The "uarea" that can be accessed via PEEKUSER and POKEUSER is a + * virtual structure that would have the following definition: + * + * struct uarea { + * struct ia64_fpreg fph[96]; // f32-f127 + * unsigned long nat_bits; + * unsigned long empty1; + * struct ia64_fpreg f2; // f2-f5 + * : + * struct ia64_fpreg f5; + * struct ia64_fpreg f10; // f10-f31 + * : + * struct ia64_fpreg f31; + * unsigned long r4; // r4-r7 + * : + * unsigned long r7; + * unsigned long b1; // b1-b5 + * : + * unsigned long b5; + * unsigned long ar_ec; + * unsigned long ar_lc; + * unsigned long empty2[5]; + * unsigned long cr_ipsr; + * unsigned long cr_iip; + * unsigned long cfm; + * unsigned long ar_unat; + * unsigned long ar_pfs; + * unsigned long ar_rsc; + * unsigned long ar_rnat; + * unsigned long ar_bspstore; + * unsigned long pr; + * unsigned long b6; + * unsigned long ar_bsp; + * unsigned long r1; + * unsigned long r2; + * unsigned long r3; + * unsigned long r12; + * unsigned long r13; + * unsigned long r14; + * unsigned long r15; + * unsigned long r8; + * unsigned long r9; + * unsigned long r10; + * unsigned long r11; + * unsigned long r16; + * : + * unsigned long r31; + * unsigned long ar_ccv; + * unsigned long ar_fpsr; + * unsigned long b0; + * unsigned long b7; + * unsigned long f6; + * unsigned long f7; + * unsigned long f8; + * unsigned long f9; + * unsigned long ar_csd; + * unsigned long ar_ssd; + * unsigned long rsvd1[710]; + * unsigned long dbr[8]; + * unsigned long rsvd2[504]; + * unsigned long ibr[8]; + * unsigned long rsvd3[504]; + * unsigned long pmd[4]; + * } + */ + +/* fph: */ +#define PT_F32 0x0000 +#define PT_F33 0x0010 +#define PT_F34 0x0020 +#define PT_F35 0x0030 +#define PT_F36 0x0040 +#define PT_F37 0x0050 +#define PT_F38 0x0060 +#define PT_F39 0x0070 +#define PT_F40 0x0080 +#define PT_F41 0x0090 +#define PT_F42 0x00a0 +#define PT_F43 0x00b0 +#define PT_F44 0x00c0 +#define PT_F45 0x00d0 +#define PT_F46 0x00e0 +#define PT_F47 0x00f0 +#define PT_F48 0x0100 +#define PT_F49 0x0110 +#define PT_F50 0x0120 +#define PT_F51 0x0130 +#define PT_F52 0x0140 +#define PT_F53 0x0150 +#define PT_F54 0x0160 +#define PT_F55 0x0170 +#define PT_F56 0x0180 +#define PT_F57 0x0190 +#define PT_F58 0x01a0 +#define PT_F59 0x01b0 +#define PT_F60 0x01c0 +#define PT_F61 0x01d0 +#define PT_F62 0x01e0 +#define PT_F63 0x01f0 +#define PT_F64 0x0200 +#define PT_F65 0x0210 +#define PT_F66 0x0220 +#define PT_F67 0x0230 +#define PT_F68 0x0240 +#define PT_F69 0x0250 +#define PT_F70 0x0260 +#define PT_F71 0x0270 +#define PT_F72 0x0280 +#define PT_F73 0x0290 +#define PT_F74 0x02a0 +#define PT_F75 0x02b0 +#define PT_F76 0x02c0 +#define PT_F77 0x02d0 +#define PT_F78 0x02e0 +#define PT_F79 0x02f0 +#define PT_F80 0x0300 +#define PT_F81 0x0310 +#define PT_F82 0x0320 +#define PT_F83 0x0330 +#define PT_F84 0x0340 +#define PT_F85 0x0350 +#define PT_F86 0x0360 +#define PT_F87 0x0370 +#define PT_F88 0x0380 +#define PT_F89 0x0390 +#define PT_F90 0x03a0 +#define PT_F91 0x03b0 +#define PT_F92 0x03c0 +#define PT_F93 0x03d0 +#define PT_F94 0x03e0 +#define PT_F95 0x03f0 +#define PT_F96 0x0400 +#define PT_F97 0x0410 +#define PT_F98 0x0420 +#define PT_F99 0x0430 +#define PT_F100 0x0440 +#define PT_F101 0x0450 +#define PT_F102 0x0460 +#define PT_F103 0x0470 +#define PT_F104 0x0480 +#define PT_F105 0x0490 +#define PT_F106 0x04a0 +#define PT_F107 0x04b0 +#define PT_F108 0x04c0 +#define PT_F109 0x04d0 +#define PT_F110 0x04e0 +#define PT_F111 0x04f0 +#define PT_F112 0x0500 +#define PT_F113 0x0510 +#define PT_F114 0x0520 +#define PT_F115 0x0530 +#define PT_F116 0x0540 +#define PT_F117 0x0550 +#define PT_F118 0x0560 +#define PT_F119 0x0570 +#define PT_F120 0x0580 +#define PT_F121 0x0590 +#define PT_F122 0x05a0 +#define PT_F123 0x05b0 +#define PT_F124 0x05c0 +#define PT_F125 0x05d0 +#define PT_F126 0x05e0 +#define PT_F127 0x05f0 + +#define PT_NAT_BITS 0x0600 + +#define PT_F2 0x0610 +#define PT_F3 0x0620 +#define PT_F4 0x0630 +#define PT_F5 0x0640 +#define PT_F10 0x0650 +#define PT_F11 0x0660 +#define PT_F12 0x0670 +#define PT_F13 0x0680 +#define PT_F14 0x0690 +#define PT_F15 0x06a0 +#define PT_F16 0x06b0 +#define PT_F17 0x06c0 +#define PT_F18 0x06d0 +#define PT_F19 0x06e0 +#define PT_F20 0x06f0 +#define PT_F21 0x0700 +#define PT_F22 0x0710 +#define PT_F23 0x0720 +#define PT_F24 0x0730 +#define PT_F25 0x0740 +#define PT_F26 0x0750 +#define PT_F27 0x0760 +#define PT_F28 0x0770 +#define PT_F29 0x0780 +#define PT_F30 0x0790 +#define PT_F31 0x07a0 +#define PT_R4 0x07b0 +#define PT_R5 0x07b8 +#define PT_R6 0x07c0 +#define PT_R7 0x07c8 + +#define PT_B1 0x07d8 +#define PT_B2 0x07e0 +#define PT_B3 0x07e8 +#define PT_B4 0x07f0 +#define PT_B5 0x07f8 + +#define PT_AR_EC 0x0800 +#define PT_AR_LC 0x0808 + +#define PT_CR_IPSR 0x0830 +#define PT_CR_IIP 0x0838 +#define PT_CFM 0x0840 +#define PT_AR_UNAT 0x0848 +#define PT_AR_PFS 0x0850 +#define PT_AR_RSC 0x0858 +#define PT_AR_RNAT 0x0860 +#define PT_AR_BSPSTORE 0x0868 +#define PT_PR 0x0870 +#define PT_B6 0x0878 +#define PT_AR_BSP 0x0880 /* note: this points to the *end* of the backing store! */ +#define PT_R1 0x0888 +#define PT_R2 0x0890 +#define PT_R3 0x0898 +#define PT_R12 0x08a0 +#define PT_R13 0x08a8 +#define PT_R14 0x08b0 +#define PT_R15 0x08b8 +#define PT_R8 0x08c0 +#define PT_R9 0x08c8 +#define PT_R10 0x08d0 +#define PT_R11 0x08d8 +#define PT_R16 0x08e0 +#define PT_R17 0x08e8 +#define PT_R18 0x08f0 +#define PT_R19 0x08f8 +#define PT_R20 0x0900 +#define PT_R21 0x0908 +#define PT_R22 0x0910 +#define PT_R23 0x0918 +#define PT_R24 0x0920 +#define PT_R25 0x0928 +#define PT_R26 0x0930 +#define PT_R27 0x0938 +#define PT_R28 0x0940 +#define PT_R29 0x0948 +#define PT_R30 0x0950 +#define PT_R31 0x0958 +#define PT_AR_CCV 0x0960 +#define PT_AR_FPSR 0x0968 +#define PT_B0 0x0970 +#define PT_B7 0x0978 +#define PT_F6 0x0980 +#define PT_F7 0x0990 +#define PT_F8 0x09a0 +#define PT_F9 0x09b0 +#define PT_AR_CSD 0x09c0 +#define PT_AR_SSD 0x09c8 + +#define PT_DBR 0x2000 /* data breakpoint registers */ +#define PT_IBR 0x3000 /* instruction breakpoint registers */ +#define PT_PMD 0x4000 /* performance monitoring counters */ + +#endif /* _ASM_IA64_PTRACE_OFFSETS_H */ diff --git a/include/asm-ia64/resource.h b/include/asm-ia64/resource.h new file mode 100644 index 000000000000..77b1eee01f30 --- /dev/null +++ b/include/asm-ia64/resource.h @@ -0,0 +1,8 @@ +#ifndef _ASM_IA64_RESOURCE_H +#define _ASM_IA64_RESOURCE_H + +#include <asm/ustack.h> +#define _STK_LIM_MAX DEFAULT_USER_STACK_SIZE +#include <asm-generic/resource.h> + +#endif /* _ASM_IA64_RESOURCE_H */ diff --git a/include/asm-ia64/rse.h b/include/asm-ia64/rse.h new file mode 100644 index 000000000000..02830a3b0196 --- /dev/null +++ b/include/asm-ia64/rse.h @@ -0,0 +1,66 @@ +#ifndef _ASM_IA64_RSE_H +#define _ASM_IA64_RSE_H + +/* + * Copyright (C) 1998, 1999 Hewlett-Packard Co + * Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com> + * + * Register stack engine related helper functions. This file may be + * used in applications, so be careful about the name-space and give + * some consideration to non-GNU C compilers (though __inline__ is + * fine). + */ + +static __inline__ unsigned long +ia64_rse_slot_num (unsigned long *addr) +{ + return (((unsigned long) addr) >> 3) & 0x3f; +} + +/* + * Return TRUE if ADDR is the address of an RNAT slot. + */ +static __inline__ unsigned long +ia64_rse_is_rnat_slot (unsigned long *addr) +{ + return ia64_rse_slot_num(addr) == 0x3f; +} + +/* + * Returns the address of the RNAT slot that covers the slot at + * address SLOT_ADDR. + */ +static __inline__ unsigned long * +ia64_rse_rnat_addr (unsigned long *slot_addr) +{ + return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3)); +} + +/* + * Calculate the number of registers in the dirty partition starting at BSPSTORE and + * ending at BSP. This isn't simply (BSP-BSPSTORE)/8 because every 64th slot stores + * ar.rnat. + */ +static __inline__ unsigned long +ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp) +{ + unsigned long slots = (bsp - bspstore); + + return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40; +} + +/* + * The inverse of the above: given bspstore and the number of + * registers, calculate ar.bsp. + */ +static __inline__ unsigned long * +ia64_rse_skip_regs (unsigned long *addr, long num_regs) +{ + long delta = ia64_rse_slot_num(addr) + num_regs; + + if (num_regs < 0) + delta -= 0x3e; + return addr + num_regs + delta/0x3f; +} + +#endif /* _ASM_IA64_RSE_H */ diff --git a/include/asm-ia64/rwsem.h b/include/asm-ia64/rwsem.h new file mode 100644 index 000000000000..6ece5061dc19 --- /dev/null +++ b/include/asm-ia64/rwsem.h @@ -0,0 +1,188 @@ +/* + * asm-ia64/rwsem.h: R/W semaphores for ia64 + * + * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com> + * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com> + * + * Based on asm-i386/rwsem.h and other architecture implementation. + * + * The MSW of the count is the negated number of active writers and + * waiting lockers, and the LSW is the total number of active locks. + * + * The lock count is initialized to 0 (no active and no waiting lockers). + * + * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case + * of an uncontended lock. Readers increment by 1 and see a positive value + * when uncontended, negative if there are writers (and maybe) readers + * waiting (in which case it goes to sleep). + */ + +#ifndef _ASM_IA64_RWSEM_H +#define _ASM_IA64_RWSEM_H + +#include <linux/list.h> +#include <linux/spinlock.h> + +#include <asm/intrinsics.h> + +/* + * the semaphore definition + */ +struct rw_semaphore { + signed int count; + spinlock_t wait_lock; + struct list_head wait_list; +#if RWSEM_DEBUG + int debug; +#endif +}; + +#define RWSEM_UNLOCKED_VALUE 0x00000000 +#define RWSEM_ACTIVE_BIAS 0x00000001 +#define RWSEM_ACTIVE_MASK 0x0000ffff +#define RWSEM_WAITING_BIAS (-0x00010000) +#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS +#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) + +/* + * initialization + */ +#if RWSEM_DEBUG +#define __RWSEM_DEBUG_INIT , 0 +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name) \ + { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ + LIST_HEAD_INIT((name).wait_list) \ + __RWSEM_DEBUG_INIT } + +#define DECLARE_RWSEM(name) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name) + +extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); +extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); + +static inline void +init_rwsem (struct rw_semaphore *sem) +{ + sem->count = RWSEM_UNLOCKED_VALUE; + spin_lock_init(&sem->wait_lock); + INIT_LIST_HEAD(&sem->wait_list); +#if RWSEM_DEBUG + sem->debug = 0; +#endif +} + +/* + * lock for reading + */ +static inline void +__down_read (struct rw_semaphore *sem) +{ + int result = ia64_fetchadd4_acq((unsigned int *)&sem->count, 1); + + if (result < 0) + rwsem_down_read_failed(sem); +} + +/* + * lock for writing + */ +static inline void +__down_write (struct rw_semaphore *sem) +{ + int old, new; + + do { + old = sem->count; + new = old + RWSEM_ACTIVE_WRITE_BIAS; + } while (cmpxchg_acq(&sem->count, old, new) != old); + + if (old != 0) + rwsem_down_write_failed(sem); +} + +/* + * unlock after reading + */ +static inline void +__up_read (struct rw_semaphore *sem) +{ + int result = ia64_fetchadd4_rel((unsigned int *)&sem->count, -1); + + if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0) + rwsem_wake(sem); +} + +/* + * unlock after writing + */ +static inline void +__up_write (struct rw_semaphore *sem) +{ + int old, new; + + do { + old = sem->count; + new = old - RWSEM_ACTIVE_WRITE_BIAS; + } while (cmpxchg_rel(&sem->count, old, new) != old); + + if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0) + rwsem_wake(sem); +} + +/* + * trylock for reading -- returns 1 if successful, 0 if contention + */ +static inline int +__down_read_trylock (struct rw_semaphore *sem) +{ + int tmp; + while ((tmp = sem->count) >= 0) { + if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) { + return 1; + } + } + return 0; +} + +/* + * trylock for writing -- returns 1 if successful, 0 if contention + */ +static inline int +__down_write_trylock (struct rw_semaphore *sem) +{ + int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, + RWSEM_ACTIVE_WRITE_BIAS); + return tmp == RWSEM_UNLOCKED_VALUE; +} + +/* + * downgrade write lock to read lock + */ +static inline void +__downgrade_write (struct rw_semaphore *sem) +{ + int old, new; + + do { + old = sem->count; + new = old - RWSEM_WAITING_BIAS; + } while (cmpxchg_rel(&sem->count, old, new) != old); + + if (old < 0) + rwsem_downgrade_wake(sem); +} + +/* + * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1 + * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd. + */ +#define rwsem_atomic_add(delta, sem) atomic_add(delta, (atomic_t *)(&(sem)->count)) +#define rwsem_atomic_update(delta, sem) atomic_add_return(delta, (atomic_t *)(&(sem)->count)) + +#endif /* _ASM_IA64_RWSEM_H */ diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h new file mode 100644 index 000000000000..ea1ed377de4c --- /dev/null +++ b/include/asm-ia64/sal.h @@ -0,0 +1,840 @@ +#ifndef _ASM_IA64_SAL_H +#define _ASM_IA64_SAL_H + +/* + * System Abstraction Layer definitions. + * + * This is based on version 2.5 of the manual "IA-64 System + * Abstraction Layer". + * + * Copyright (C) 2001 Intel + * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com> + * Copyright (C) 2001 Fred Lewis <frederick.v.lewis@intel.com> + * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1999 Srinivasa Prasad Thirumalachar <sprasad@sprasad.engr.sgi.com> + * + * 02/01/04 J. Hall Updated Error Record Structures to conform to July 2001 + * revision of the SAL spec. + * 01/01/03 fvlewis Updated Error Record Structures to conform with Nov. 2000 + * revision of the SAL spec. + * 99/09/29 davidm Updated for SAL 2.6. + * 00/03/29 cfleck Updated SAL Error Logging info for processor (SAL 2.6) + * (plus examples of platform error info structures from smariset @ Intel) + */ + +#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT 0 +#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT 1 +#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT 2 +#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT 3 + +#define IA64_SAL_PLATFORM_FEATURE_BUS_LOCK (1<<IA64_SAL_PLATFORM_FEATURE_BUS_LOCK_BIT) +#define IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT_BIT) +#define IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT (1<<IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT_BIT) +#define IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT (1<<IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT_BIT) + +#ifndef __ASSEMBLY__ + +#include <linux/bcd.h> +#include <linux/spinlock.h> +#include <linux/efi.h> + +#include <asm/pal.h> +#include <asm/system.h> +#include <asm/fpu.h> + +extern spinlock_t sal_lock; + +/* SAL spec _requires_ eight args for each call. */ +#define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7) \ + result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7) + +# define SAL_CALL(result,args...) do { \ + unsigned long __ia64_sc_flags; \ + struct ia64_fpreg __ia64_sc_fr[6]; \ + ia64_save_scratch_fpregs(__ia64_sc_fr); \ + spin_lock_irqsave(&sal_lock, __ia64_sc_flags); \ + __SAL_CALL(result, args); \ + spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags); \ + ia64_load_scratch_fpregs(__ia64_sc_fr); \ +} while (0) + +# define SAL_CALL_NOLOCK(result,args...) do { \ + unsigned long __ia64_scn_flags; \ + struct ia64_fpreg __ia64_scn_fr[6]; \ + ia64_save_scratch_fpregs(__ia64_scn_fr); \ + local_irq_save(__ia64_scn_flags); \ + __SAL_CALL(result, args); \ + local_irq_restore(__ia64_scn_flags); \ + ia64_load_scratch_fpregs(__ia64_scn_fr); \ +} while (0) + +# define SAL_CALL_REENTRANT(result,args...) do { \ + struct ia64_fpreg __ia64_scs_fr[6]; \ + ia64_save_scratch_fpregs(__ia64_scs_fr); \ + preempt_disable(); \ + __SAL_CALL(result, args); \ + preempt_enable(); \ + ia64_load_scratch_fpregs(__ia64_scs_fr); \ +} while (0) + +#define SAL_SET_VECTORS 0x01000000 +#define SAL_GET_STATE_INFO 0x01000001 +#define SAL_GET_STATE_INFO_SIZE 0x01000002 +#define SAL_CLEAR_STATE_INFO 0x01000003 +#define SAL_MC_RENDEZ 0x01000004 +#define SAL_MC_SET_PARAMS 0x01000005 +#define SAL_REGISTER_PHYSICAL_ADDR 0x01000006 + +#define SAL_CACHE_FLUSH 0x01000008 +#define SAL_CACHE_INIT 0x01000009 +#define SAL_PCI_CONFIG_READ 0x01000010 +#define SAL_PCI_CONFIG_WRITE 0x01000011 +#define SAL_FREQ_BASE 0x01000012 + +#define SAL_UPDATE_PAL 0x01000020 + +struct ia64_sal_retval { + /* + * A zero status value indicates call completed without error. + * A negative status value indicates reason of call failure. + * A positive status value indicates success but an + * informational value should be printed (e.g., "reboot for + * change to take effect"). + */ + s64 status; + u64 v0; + u64 v1; + u64 v2; +}; + +typedef struct ia64_sal_retval (*ia64_sal_handler) (u64, ...); + +enum { + SAL_FREQ_BASE_PLATFORM = 0, + SAL_FREQ_BASE_INTERVAL_TIMER = 1, + SAL_FREQ_BASE_REALTIME_CLOCK = 2 +}; + +/* + * The SAL system table is followed by a variable number of variable + * length descriptors. The structure of these descriptors follows + * below. + * The defininition follows SAL specs from July 2000 + */ +struct ia64_sal_systab { + u8 signature[4]; /* should be "SST_" */ + u32 size; /* size of this table in bytes */ + u8 sal_rev_minor; + u8 sal_rev_major; + u16 entry_count; /* # of entries in variable portion */ + u8 checksum; + u8 reserved1[7]; + u8 sal_a_rev_minor; + u8 sal_a_rev_major; + u8 sal_b_rev_minor; + u8 sal_b_rev_major; + /* oem_id & product_id: terminating NUL is missing if string is exactly 32 bytes long. */ + u8 oem_id[32]; + u8 product_id[32]; /* ASCII product id */ + u8 reserved2[8]; +}; + +enum sal_systab_entry_type { + SAL_DESC_ENTRY_POINT = 0, + SAL_DESC_MEMORY = 1, + SAL_DESC_PLATFORM_FEATURE = 2, + SAL_DESC_TR = 3, + SAL_DESC_PTC = 4, + SAL_DESC_AP_WAKEUP = 5 +}; + +/* + * Entry type: Size: + * 0 48 + * 1 32 + * 2 16 + * 3 32 + * 4 16 + * 5 16 + */ +#define SAL_DESC_SIZE(type) "\060\040\020\040\020\020"[(unsigned) type] + +typedef struct ia64_sal_desc_entry_point { + u8 type; + u8 reserved1[7]; + u64 pal_proc; + u64 sal_proc; + u64 gp; + u8 reserved2[16]; +}ia64_sal_desc_entry_point_t; + +typedef struct ia64_sal_desc_memory { + u8 type; + u8 used_by_sal; /* needs to be mapped for SAL? */ + u8 mem_attr; /* current memory attribute setting */ + u8 access_rights; /* access rights set up by SAL */ + u8 mem_attr_mask; /* mask of supported memory attributes */ + u8 reserved1; + u8 mem_type; /* memory type */ + u8 mem_usage; /* memory usage */ + u64 addr; /* physical address of memory */ + u32 length; /* length (multiple of 4KB pages) */ + u32 reserved2; + u8 oem_reserved[8]; +} ia64_sal_desc_memory_t; + +typedef struct ia64_sal_desc_platform_feature { + u8 type; + u8 feature_mask; + u8 reserved1[14]; +} ia64_sal_desc_platform_feature_t; + +typedef struct ia64_sal_desc_tr { + u8 type; + u8 tr_type; /* 0 == instruction, 1 == data */ + u8 regnum; /* translation register number */ + u8 reserved1[5]; + u64 addr; /* virtual address of area covered */ + u64 page_size; /* encoded page size */ + u8 reserved2[8]; +} ia64_sal_desc_tr_t; + +typedef struct ia64_sal_desc_ptc { + u8 type; + u8 reserved1[3]; + u32 num_domains; /* # of coherence domains */ + u64 domain_info; /* physical address of domain info table */ +} ia64_sal_desc_ptc_t; + +typedef struct ia64_sal_ptc_domain_info { + u64 proc_count; /* number of processors in domain */ + u64 proc_list; /* physical address of LID array */ +} ia64_sal_ptc_domain_info_t; + +typedef struct ia64_sal_ptc_domain_proc_entry { + u64 id : 8; /* id of processor */ + u64 eid : 8; /* eid of processor */ +} ia64_sal_ptc_domain_proc_entry_t; + + +#define IA64_SAL_AP_EXTERNAL_INT 0 + +typedef struct ia64_sal_desc_ap_wakeup { + u8 type; + u8 mechanism; /* 0 == external interrupt */ + u8 reserved1[6]; + u64 vector; /* interrupt vector in range 0x10-0xff */ +} ia64_sal_desc_ap_wakeup_t ; + +extern ia64_sal_handler ia64_sal; +extern struct ia64_sal_desc_ptc *ia64_ptc_domain_info; + +extern unsigned short sal_revision; /* supported SAL spec revision */ +extern unsigned short sal_version; /* SAL version; OEM dependent */ +#define SAL_VERSION_CODE(major, minor) ((BIN2BCD(major) << 8) | BIN2BCD(minor)) + +extern const char *ia64_sal_strerror (long status); +extern void ia64_sal_init (struct ia64_sal_systab *sal_systab); + +/* SAL information type encodings */ +enum { + SAL_INFO_TYPE_MCA = 0, /* Machine check abort information */ + SAL_INFO_TYPE_INIT = 1, /* Init information */ + SAL_INFO_TYPE_CMC = 2, /* Corrected machine check information */ + SAL_INFO_TYPE_CPE = 3 /* Corrected platform error information */ +}; + +/* Encodings for machine check parameter types */ +enum { + SAL_MC_PARAM_RENDEZ_INT = 1, /* Rendezvous interrupt */ + SAL_MC_PARAM_RENDEZ_WAKEUP = 2, /* Wakeup */ + SAL_MC_PARAM_CPE_INT = 3 /* Corrected Platform Error Int */ +}; + +/* Encodings for rendezvous mechanisms */ +enum { + SAL_MC_PARAM_MECHANISM_INT = 1, /* Use interrupt */ + SAL_MC_PARAM_MECHANISM_MEM = 2 /* Use memory synchronization variable*/ +}; + +/* Encodings for vectors which can be registered by the OS with SAL */ +enum { + SAL_VECTOR_OS_MCA = 0, + SAL_VECTOR_OS_INIT = 1, + SAL_VECTOR_OS_BOOT_RENDEZ = 2 +}; + +/* Encodings for mca_opt parameter sent to SAL_MC_SET_PARAMS */ +#define SAL_MC_PARAM_RZ_ALWAYS 0x1 +#define SAL_MC_PARAM_BINIT_ESCALATE 0x10 + +/* + * Definition of the SAL Error Log from the SAL spec + */ + +/* SAL Error Record Section GUID Definitions */ +#define SAL_PROC_DEV_ERR_SECT_GUID \ + EFI_GUID(0xe429faf1, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SAL_PLAT_MEM_DEV_ERR_SECT_GUID \ + EFI_GUID(0xe429faf2, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SAL_PLAT_SEL_DEV_ERR_SECT_GUID \ + EFI_GUID(0xe429faf3, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SAL_PLAT_PCI_BUS_ERR_SECT_GUID \ + EFI_GUID(0xe429faf4, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID \ + EFI_GUID(0xe429faf5, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SAL_PLAT_PCI_COMP_ERR_SECT_GUID \ + EFI_GUID(0xe429faf6, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SAL_PLAT_SPECIFIC_ERR_SECT_GUID \ + EFI_GUID(0xe429faf7, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SAL_PLAT_HOST_CTLR_ERR_SECT_GUID \ + EFI_GUID(0xe429faf8, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) +#define SAL_PLAT_BUS_ERR_SECT_GUID \ + EFI_GUID(0xe429faf9, 0x3cb7, 0x11d4, 0xbc, 0xa7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81) + +#define MAX_CACHE_ERRORS 6 +#define MAX_TLB_ERRORS 6 +#define MAX_BUS_ERRORS 1 + +/* Definition of version according to SAL spec for logging purposes */ +typedef struct sal_log_revision { + u8 minor; /* BCD (0..99) */ + u8 major; /* BCD (0..99) */ +} sal_log_revision_t; + +/* Definition of timestamp according to SAL spec for logging purposes */ +typedef struct sal_log_timestamp { + u8 slh_second; /* Second (0..59) */ + u8 slh_minute; /* Minute (0..59) */ + u8 slh_hour; /* Hour (0..23) */ + u8 slh_reserved; + u8 slh_day; /* Day (1..31) */ + u8 slh_month; /* Month (1..12) */ + u8 slh_year; /* Year (00..99) */ + u8 slh_century; /* Century (19, 20, 21, ...) */ +} sal_log_timestamp_t; + +/* Definition of log record header structures */ +typedef struct sal_log_record_header { + u64 id; /* Unique monotonically increasing ID */ + sal_log_revision_t revision; /* Major and Minor revision of header */ + u16 severity; /* Error Severity */ + u32 len; /* Length of this error log in bytes */ + sal_log_timestamp_t timestamp; /* Timestamp */ + efi_guid_t platform_guid; /* Unique OEM Platform ID */ +} sal_log_record_header_t; + +#define sal_log_severity_recoverable 0 +#define sal_log_severity_fatal 1 +#define sal_log_severity_corrected 2 + +/* Definition of log section header structures */ +typedef struct sal_log_sec_header { + efi_guid_t guid; /* Unique Section ID */ + sal_log_revision_t revision; /* Major and Minor revision of Section */ + u16 reserved; + u32 len; /* Section length */ +} sal_log_section_hdr_t; + +typedef struct sal_log_mod_error_info { + struct { + u64 check_info : 1, + requestor_identifier : 1, + responder_identifier : 1, + target_identifier : 1, + precise_ip : 1, + reserved : 59; + } valid; + u64 check_info; + u64 requestor_identifier; + u64 responder_identifier; + u64 target_identifier; + u64 precise_ip; +} sal_log_mod_error_info_t; + +typedef struct sal_processor_static_info { + struct { + u64 minstate : 1, + br : 1, + cr : 1, + ar : 1, + rr : 1, + fr : 1, + reserved : 58; + } valid; + pal_min_state_area_t min_state_area; + u64 br[8]; + u64 cr[128]; + u64 ar[128]; + u64 rr[8]; + struct ia64_fpreg __attribute__ ((packed)) fr[128]; +} sal_processor_static_info_t; + +struct sal_cpuid_info { + u64 regs[5]; + u64 reserved; +}; + +typedef struct sal_log_processor_info { + sal_log_section_hdr_t header; + struct { + u64 proc_error_map : 1, + proc_state_param : 1, + proc_cr_lid : 1, + psi_static_struct : 1, + num_cache_check : 4, + num_tlb_check : 4, + num_bus_check : 4, + num_reg_file_check : 4, + num_ms_check : 4, + cpuid_info : 1, + reserved1 : 39; + } valid; + u64 proc_error_map; + u64 proc_state_parameter; + u64 proc_cr_lid; + /* + * The rest of this structure consists of variable-length arrays, which can't be + * expressed in C. + */ + sal_log_mod_error_info_t info[0]; + /* + * This is what the rest looked like if C supported variable-length arrays: + * + * sal_log_mod_error_info_t cache_check_info[.valid.num_cache_check]; + * sal_log_mod_error_info_t tlb_check_info[.valid.num_tlb_check]; + * sal_log_mod_error_info_t bus_check_info[.valid.num_bus_check]; + * sal_log_mod_error_info_t reg_file_check_info[.valid.num_reg_file_check]; + * sal_log_mod_error_info_t ms_check_info[.valid.num_ms_check]; + * struct sal_cpuid_info cpuid_info; + * sal_processor_static_info_t processor_static_info; + */ +} sal_log_processor_info_t; + +/* Given a sal_log_processor_info_t pointer, return a pointer to the processor_static_info: */ +#define SAL_LPI_PSI_INFO(l) \ +({ sal_log_processor_info_t *_l = (l); \ + ((sal_processor_static_info_t *) \ + ((char *) _l->info + ((_l->valid.num_cache_check + _l->valid.num_tlb_check \ + + _l->valid.num_bus_check + _l->valid.num_reg_file_check \ + + _l->valid.num_ms_check) * sizeof(sal_log_mod_error_info_t) \ + + sizeof(struct sal_cpuid_info)))); \ +}) + +/* platform error log structures */ + +typedef struct sal_log_mem_dev_err_info { + sal_log_section_hdr_t header; + struct { + u64 error_status : 1, + physical_addr : 1, + addr_mask : 1, + node : 1, + card : 1, + module : 1, + bank : 1, + device : 1, + row : 1, + column : 1, + bit_position : 1, + requestor_id : 1, + responder_id : 1, + target_id : 1, + bus_spec_data : 1, + oem_id : 1, + oem_data : 1, + reserved : 47; + } valid; + u64 error_status; + u64 physical_addr; + u64 addr_mask; + u16 node; + u16 card; + u16 module; + u16 bank; + u16 device; + u16 row; + u16 column; + u16 bit_position; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u64 bus_spec_data; + u8 oem_id[16]; + u8 oem_data[1]; /* Variable length data */ +} sal_log_mem_dev_err_info_t; + +typedef struct sal_log_sel_dev_err_info { + sal_log_section_hdr_t header; + struct { + u64 record_id : 1, + record_type : 1, + generator_id : 1, + evm_rev : 1, + sensor_type : 1, + sensor_num : 1, + event_dir : 1, + event_data1 : 1, + event_data2 : 1, + event_data3 : 1, + reserved : 54; + } valid; + u16 record_id; + u8 record_type; + u8 timestamp[4]; + u16 generator_id; + u8 evm_rev; + u8 sensor_type; + u8 sensor_num; + u8 event_dir; + u8 event_data1; + u8 event_data2; + u8 event_data3; +} sal_log_sel_dev_err_info_t; + +typedef struct sal_log_pci_bus_err_info { + sal_log_section_hdr_t header; + struct { + u64 err_status : 1, + err_type : 1, + bus_id : 1, + bus_address : 1, + bus_data : 1, + bus_cmd : 1, + requestor_id : 1, + responder_id : 1, + target_id : 1, + oem_data : 1, + reserved : 54; + } valid; + u64 err_status; + u16 err_type; + u16 bus_id; + u32 reserved; + u64 bus_address; + u64 bus_data; + u64 bus_cmd; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u8 oem_data[1]; /* Variable length data */ +} sal_log_pci_bus_err_info_t; + +typedef struct sal_log_smbios_dev_err_info { + sal_log_section_hdr_t header; + struct { + u64 event_type : 1, + length : 1, + time_stamp : 1, + data : 1, + reserved1 : 60; + } valid; + u8 event_type; + u8 length; + u8 time_stamp[6]; + u8 data[1]; /* data of variable length, length == slsmb_length */ +} sal_log_smbios_dev_err_info_t; + +typedef struct sal_log_pci_comp_err_info { + sal_log_section_hdr_t header; + struct { + u64 err_status : 1, + comp_info : 1, + num_mem_regs : 1, + num_io_regs : 1, + reg_data_pairs : 1, + oem_data : 1, + reserved : 58; + } valid; + u64 err_status; + struct { + u16 vendor_id; + u16 device_id; + u8 class_code[3]; + u8 func_num; + u8 dev_num; + u8 bus_num; + u8 seg_num; + u8 reserved[5]; + } comp_info; + u32 num_mem_regs; + u32 num_io_regs; + u64 reg_data_pairs[1]; + /* + * array of address/data register pairs is num_mem_regs + num_io_regs elements + * long. Each array element consists of a u64 address followed by a u64 data + * value. The oem_data array immediately follows the reg_data_pairs array + */ + u8 oem_data[1]; /* Variable length data */ +} sal_log_pci_comp_err_info_t; + +typedef struct sal_log_plat_specific_err_info { + sal_log_section_hdr_t header; + struct { + u64 err_status : 1, + guid : 1, + oem_data : 1, + reserved : 61; + } valid; + u64 err_status; + efi_guid_t guid; + u8 oem_data[1]; /* platform specific variable length data */ +} sal_log_plat_specific_err_info_t; + +typedef struct sal_log_host_ctlr_err_info { + sal_log_section_hdr_t header; + struct { + u64 err_status : 1, + requestor_id : 1, + responder_id : 1, + target_id : 1, + bus_spec_data : 1, + oem_data : 1, + reserved : 58; + } valid; + u64 err_status; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u64 bus_spec_data; + u8 oem_data[1]; /* Variable length OEM data */ +} sal_log_host_ctlr_err_info_t; + +typedef struct sal_log_plat_bus_err_info { + sal_log_section_hdr_t header; + struct { + u64 err_status : 1, + requestor_id : 1, + responder_id : 1, + target_id : 1, + bus_spec_data : 1, + oem_data : 1, + reserved : 58; + } valid; + u64 err_status; + u64 requestor_id; + u64 responder_id; + u64 target_id; + u64 bus_spec_data; + u8 oem_data[1]; /* Variable length OEM data */ +} sal_log_plat_bus_err_info_t; + +/* Overall platform error section structure */ +typedef union sal_log_platform_err_info { + sal_log_mem_dev_err_info_t mem_dev_err; + sal_log_sel_dev_err_info_t sel_dev_err; + sal_log_pci_bus_err_info_t pci_bus_err; + sal_log_smbios_dev_err_info_t smbios_dev_err; + sal_log_pci_comp_err_info_t pci_comp_err; + sal_log_plat_specific_err_info_t plat_specific_err; + sal_log_host_ctlr_err_info_t host_ctlr_err; + sal_log_plat_bus_err_info_t plat_bus_err; +} sal_log_platform_err_info_t; + +/* SAL log over-all, multi-section error record structure (processor+platform) */ +typedef struct err_rec { + sal_log_record_header_t sal_elog_header; + sal_log_processor_info_t proc_err; + sal_log_platform_err_info_t plat_err; + u8 oem_data_pad[1024]; +} ia64_err_rec_t; + +/* + * Now define a couple of inline functions for improved type checking + * and convenience. + */ +static inline long +ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second, + unsigned long *drift_info) +{ + struct ia64_sal_retval isrv; + + SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0); + *ticks_per_second = isrv.v0; + *drift_info = isrv.v1; + return isrv.status; +} + +/* Flush all the processor and platform level instruction and/or data caches */ +static inline s64 +ia64_sal_cache_flush (u64 cache_type) +{ + struct ia64_sal_retval isrv; + SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0); + return isrv.status; +} + + +/* Initialize all the processor and platform level instruction and data caches */ +static inline s64 +ia64_sal_cache_init (void) +{ + struct ia64_sal_retval isrv; + SAL_CALL(isrv, SAL_CACHE_INIT, 0, 0, 0, 0, 0, 0, 0); + return isrv.status; +} + +/* + * Clear the processor and platform information logged by SAL with respect to the machine + * state at the time of MCA's, INITs, CMCs, or CPEs. + */ +static inline s64 +ia64_sal_clear_state_info (u64 sal_info_type) +{ + struct ia64_sal_retval isrv; + SAL_CALL_REENTRANT(isrv, SAL_CLEAR_STATE_INFO, sal_info_type, 0, + 0, 0, 0, 0, 0); + return isrv.status; +} + + +/* Get the processor and platform information logged by SAL with respect to the machine + * state at the time of the MCAs, INITs, CMCs, or CPEs. + */ +static inline u64 +ia64_sal_get_state_info (u64 sal_info_type, u64 *sal_info) +{ + struct ia64_sal_retval isrv; + SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO, sal_info_type, 0, + sal_info, 0, 0, 0, 0); + if (isrv.status) + return 0; + + return isrv.v0; +} + +/* + * Get the maximum size of the information logged by SAL with respect to the machine state + * at the time of MCAs, INITs, CMCs, or CPEs. + */ +static inline u64 +ia64_sal_get_state_info_size (u64 sal_info_type) +{ + struct ia64_sal_retval isrv; + SAL_CALL_REENTRANT(isrv, SAL_GET_STATE_INFO_SIZE, sal_info_type, 0, + 0, 0, 0, 0, 0); + if (isrv.status) + return 0; + return isrv.v0; +} + +/* + * Causes the processor to go into a spin loop within SAL where SAL awaits a wakeup from + * the monarch processor. Must not lock, because it will not return on any cpu until the + * monarch processor sends a wake up. + */ +static inline s64 +ia64_sal_mc_rendez (void) +{ + struct ia64_sal_retval isrv; + SAL_CALL_NOLOCK(isrv, SAL_MC_RENDEZ, 0, 0, 0, 0, 0, 0, 0); + return isrv.status; +} + +/* + * Allow the OS to specify the interrupt number to be used by SAL to interrupt OS during + * the machine check rendezvous sequence as well as the mechanism to wake up the + * non-monarch processor at the end of machine check processing. + * Returns the complete ia64_sal_retval because some calls return more than just a status + * value. + */ +static inline struct ia64_sal_retval +ia64_sal_mc_set_params (u64 param_type, u64 i_or_m, u64 i_or_m_val, u64 timeout, u64 rz_always) +{ + struct ia64_sal_retval isrv; + SAL_CALL(isrv, SAL_MC_SET_PARAMS, param_type, i_or_m, i_or_m_val, + timeout, rz_always, 0, 0); + return isrv; +} + +/* Read from PCI configuration space */ +static inline s64 +ia64_sal_pci_config_read (u64 pci_config_addr, int type, u64 size, u64 *value) +{ + struct ia64_sal_retval isrv; + SAL_CALL(isrv, SAL_PCI_CONFIG_READ, pci_config_addr, size, type, 0, 0, 0, 0); + if (value) + *value = isrv.v0; + return isrv.status; +} + +/* Write to PCI configuration space */ +static inline s64 +ia64_sal_pci_config_write (u64 pci_config_addr, int type, u64 size, u64 value) +{ + struct ia64_sal_retval isrv; + SAL_CALL(isrv, SAL_PCI_CONFIG_WRITE, pci_config_addr, size, value, + type, 0, 0, 0); + return isrv.status; +} + +/* + * Register physical addresses of locations needed by SAL when SAL procedures are invoked + * in virtual mode. + */ +static inline s64 +ia64_sal_register_physical_addr (u64 phys_entry, u64 phys_addr) +{ + struct ia64_sal_retval isrv; + SAL_CALL(isrv, SAL_REGISTER_PHYSICAL_ADDR, phys_entry, phys_addr, + 0, 0, 0, 0, 0); + return isrv.status; +} + +/* + * Register software dependent code locations within SAL. These locations are handlers or + * entry points where SAL will pass control for the specified event. These event handlers + * are for the bott rendezvous, MCAs and INIT scenarios. + */ +static inline s64 +ia64_sal_set_vectors (u64 vector_type, + u64 handler_addr1, u64 gp1, u64 handler_len1, + u64 handler_addr2, u64 gp2, u64 handler_len2) +{ + struct ia64_sal_retval isrv; + SAL_CALL(isrv, SAL_SET_VECTORS, vector_type, + handler_addr1, gp1, handler_len1, + handler_addr2, gp2, handler_len2); + + return isrv.status; +} + +/* Update the contents of PAL block in the non-volatile storage device */ +static inline s64 +ia64_sal_update_pal (u64 param_buf, u64 scratch_buf, u64 scratch_buf_size, + u64 *error_code, u64 *scratch_buf_size_needed) +{ + struct ia64_sal_retval isrv; + SAL_CALL(isrv, SAL_UPDATE_PAL, param_buf, scratch_buf, scratch_buf_size, + 0, 0, 0, 0); + if (error_code) + *error_code = isrv.v0; + if (scratch_buf_size_needed) + *scratch_buf_size_needed = isrv.v1; + return isrv.status; +} + +extern unsigned long sal_platform_features; + +extern int (*salinfo_platform_oemdata)(const u8 *, u8 **, u64 *); + +struct sal_ret_values { + long r8; long r9; long r10; long r11; +}; + +#define IA64_SAL_OEMFUNC_MIN 0x02000000 +#define IA64_SAL_OEMFUNC_MAX 0x03ffffff + +extern int ia64_sal_oemcall(struct ia64_sal_retval *, u64, u64, u64, u64, u64, + u64, u64, u64); +extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64, + u64, u64, u64, u64, u64); +extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64, + u64, u64, u64, u64, u64); + +extern void ia64_sal_handler_init(void *entry_point, void *gpval); + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_IA64_SAL_H */ diff --git a/include/asm-ia64/scatterlist.h b/include/asm-ia64/scatterlist.h new file mode 100644 index 000000000000..834a189ef189 --- /dev/null +++ b/include/asm-ia64/scatterlist.h @@ -0,0 +1,28 @@ +#ifndef _ASM_IA64_SCATTERLIST_H +#define _ASM_IA64_SCATTERLIST_H + +/* + * Modified 1998-1999, 2001-2002, 2004 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +struct scatterlist { + struct page *page; + unsigned int offset; + unsigned int length; /* buffer length */ + + dma_addr_t dma_address; + unsigned int dma_length; +}; + +/* + * It used to be that ISA_DMA_THRESHOLD had something to do with the + * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart + * from the aha1542.c driver, which isn't 64-bit clean anyhow) is to + * tell the block-layer (via BLK_BOUNCE_ISA) what the max. physical + * address of a page is that is allocated with GFP_DMA. On IA-64, + * that's 4GB - 1. + */ +#define ISA_DMA_THRESHOLD 0xffffffff + +#endif /* _ASM_IA64_SCATTERLIST_H */ diff --git a/include/asm-ia64/sections.h b/include/asm-ia64/sections.h new file mode 100644 index 000000000000..8e3dbde1b429 --- /dev/null +++ b/include/asm-ia64/sections.h @@ -0,0 +1,22 @@ +#ifndef _ASM_IA64_SECTIONS_H +#define _ASM_IA64_SECTIONS_H + +/* + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <asm-generic/sections.h> + +extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[]; +extern char __start___vtop_patchlist[], __end___vtop_patchlist[]; +extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[]; +extern char __start_gate_section[]; +extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[]; +extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[]; +extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[]; +extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_bubble_down_patchlist[]; +extern char __start_unwind[], __end_unwind[]; + +#endif /* _ASM_IA64_SECTIONS_H */ + diff --git a/include/asm-ia64/segment.h b/include/asm-ia64/segment.h new file mode 100644 index 000000000000..b89e2b3d648f --- /dev/null +++ b/include/asm-ia64/segment.h @@ -0,0 +1,6 @@ +#ifndef _ASM_IA64_SEGMENT_H +#define _ASM_IA64_SEGMENT_H + +/* Only here because we have some old header files that expect it.. */ + +#endif /* _ASM_IA64_SEGMENT_H */ diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h new file mode 100644 index 000000000000..3a2f0f3f78f3 --- /dev/null +++ b/include/asm-ia64/semaphore.h @@ -0,0 +1,102 @@ +#ifndef _ASM_IA64_SEMAPHORE_H +#define _ASM_IA64_SEMAPHORE_H + +/* + * Copyright (C) 1998-2000 Hewlett-Packard Co + * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/wait.h> +#include <linux/rwsem.h> + +#include <asm/atomic.h> + +struct semaphore { + atomic_t count; + int sleepers; + wait_queue_head_t wait; +}; + +#define __SEMAPHORE_INITIALIZER(name, n) \ +{ \ + .count = ATOMIC_INIT(n), \ + .sleepers = 0, \ + .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ +} + +#define __MUTEX_INITIALIZER(name) __SEMAPHORE_INITIALIZER(name,1) + +#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) + +#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) +#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) + +static inline void +sema_init (struct semaphore *sem, int val) +{ + *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); +} + +static inline void +init_MUTEX (struct semaphore *sem) +{ + sema_init(sem, 1); +} + +static inline void +init_MUTEX_LOCKED (struct semaphore *sem) +{ + sema_init(sem, 0); +} + +extern void __down (struct semaphore * sem); +extern int __down_interruptible (struct semaphore * sem); +extern int __down_trylock (struct semaphore * sem); +extern void __up (struct semaphore * sem); + +/* + * Atomically decrement the semaphore's count. If it goes negative, + * block the calling thread in the TASK_UNINTERRUPTIBLE state. + */ +static inline void +down (struct semaphore *sem) +{ + might_sleep(); + if (atomic_dec_return(&sem->count) < 0) + __down(sem); +} + +/* + * Atomically decrement the semaphore's count. If it goes negative, + * block the calling thread in the TASK_INTERRUPTIBLE state. + */ +static inline int +down_interruptible (struct semaphore * sem) +{ + int ret = 0; + + might_sleep(); + if (atomic_dec_return(&sem->count) < 0) + ret = __down_interruptible(sem); + return ret; +} + +static inline int +down_trylock (struct semaphore *sem) +{ + int ret = 0; + + if (atomic_dec_return(&sem->count) < 0) + ret = __down_trylock(sem); + return ret; +} + +static inline void +up (struct semaphore * sem) +{ + if (atomic_inc_return(&sem->count) <= 0) + __up(sem); +} + +#endif /* _ASM_IA64_SEMAPHORE_H */ diff --git a/include/asm-ia64/sembuf.h b/include/asm-ia64/sembuf.h new file mode 100644 index 000000000000..1340fbc04d3e --- /dev/null +++ b/include/asm-ia64/sembuf.h @@ -0,0 +1,22 @@ +#ifndef _ASM_IA64_SEMBUF_H +#define _ASM_IA64_SEMBUF_H + +/* + * The semid64_ds structure for IA-64 architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + __kernel_time_t sem_ctime; /* last change time */ + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* _ASM_IA64_SEMBUF_H */ diff --git a/include/asm-ia64/serial.h b/include/asm-ia64/serial.h new file mode 100644 index 000000000000..0c7a2f3dcf13 --- /dev/null +++ b/include/asm-ia64/serial.h @@ -0,0 +1,19 @@ +/* + * include/asm-ia64/serial.h + * + * Derived from the i386 version. + */ + +/* + * This assumes you have a 1.8432 MHz clock for your UART. + * + * It'd be nice if someone built a serial card with a 24.576 MHz + * clock, since the 16550A is capable of handling a top speed of 1.5 + * megabits/second; but this requires the faster clock. + */ +#define BASE_BAUD ( 1843200 / 16 ) + +/* + * All legacy serial ports should be enumerated via ACPI namespace, so + * we need not list them here. + */ diff --git a/include/asm-ia64/setup.h b/include/asm-ia64/setup.h new file mode 100644 index 000000000000..ea29b57affcb --- /dev/null +++ b/include/asm-ia64/setup.h @@ -0,0 +1,6 @@ +#ifndef __IA64_SETUP_H +#define __IA64_SETUP_H + +#define COMMAND_LINE_SIZE 512 + +#endif diff --git a/include/asm-ia64/shmbuf.h b/include/asm-ia64/shmbuf.h new file mode 100644 index 000000000000..585002a77acd --- /dev/null +++ b/include/asm-ia64/shmbuf.h @@ -0,0 +1,38 @@ +#ifndef _ASM_IA64_SHMBUF_H +#define _ASM_IA64_SHMBUF_H + +/* + * The shmid64_ds structure for IA-64 architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 2 miscellaneous 64-bit values + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + __kernel_time_t shm_dtime; /* last detach time */ + __kernel_time_t shm_ctime; /* last change time */ + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused1; + unsigned long __unused2; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* _ASM_IA64_SHMBUF_H */ diff --git a/include/asm-ia64/shmparam.h b/include/asm-ia64/shmparam.h new file mode 100644 index 000000000000..d07508dc54ae --- /dev/null +++ b/include/asm-ia64/shmparam.h @@ -0,0 +1,12 @@ +#ifndef _ASM_IA64_SHMPARAM_H +#define _ASM_IA64_SHMPARAM_H + +/* + * SHMLBA controls minimum alignment at which shared memory segments + * get attached. The IA-64 architecture says that there may be a + * performance degradation when there are virtual aliases within 1MB. + * To reduce the chance of this, we set SHMLBA to 1MB. --davidm 00/12/20 + */ +#define SHMLBA (1024*1024) + +#endif /* _ASM_IA64_SHMPARAM_H */ diff --git a/include/asm-ia64/sigcontext.h b/include/asm-ia64/sigcontext.h new file mode 100644 index 000000000000..57ff777bcc40 --- /dev/null +++ b/include/asm-ia64/sigcontext.h @@ -0,0 +1,70 @@ +#ifndef _ASM_IA64_SIGCONTEXT_H +#define _ASM_IA64_SIGCONTEXT_H + +/* + * Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co + * Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <asm/fpu.h> + +#define IA64_SC_FLAG_ONSTACK_BIT 0 /* is handler running on signal stack? */ +#define IA64_SC_FLAG_IN_SYSCALL_BIT 1 /* did signal interrupt a syscall? */ +#define IA64_SC_FLAG_FPH_VALID_BIT 2 /* is state in f[32]-f[127] valid? */ + +#define IA64_SC_FLAG_ONSTACK (1 << IA64_SC_FLAG_ONSTACK_BIT) +#define IA64_SC_FLAG_IN_SYSCALL (1 << IA64_SC_FLAG_IN_SYSCALL_BIT) +#define IA64_SC_FLAG_FPH_VALID (1 << IA64_SC_FLAG_FPH_VALID_BIT) + +# ifndef __ASSEMBLY__ + +/* + * Note on handling of register backing store: sc_ar_bsp contains the address that would + * be found in ar.bsp after executing a "cover" instruction the context in which the + * signal was raised. If signal delivery required switching to an alternate signal stack + * (sc_rbs_base is not NULL), the "dirty" partition (as it would exist after executing the + * imaginary "cover" instruction) is backed by the *alternate* signal stack, not the + * original one. In this case, sc_rbs_base contains the base address of the new register + * backing store. The number of registers in the dirty partition can be calculated as: + * + * ndirty = ia64_rse_num_regs(sc_rbs_base, sc_rbs_base + (sc_loadrs >> 16)) + * + */ + +struct sigcontext { + unsigned long sc_flags; /* see manifest constants above */ + unsigned long sc_nat; /* bit i == 1 iff scratch reg gr[i] is a NaT */ + stack_t sc_stack; /* previously active stack */ + + unsigned long sc_ip; /* instruction pointer */ + unsigned long sc_cfm; /* current frame marker */ + unsigned long sc_um; /* user mask bits */ + unsigned long sc_ar_rsc; /* register stack configuration register */ + unsigned long sc_ar_bsp; /* backing store pointer */ + unsigned long sc_ar_rnat; /* RSE NaT collection register */ + unsigned long sc_ar_ccv; /* compare and exchange compare value register */ + unsigned long sc_ar_unat; /* ar.unat of interrupted context */ + unsigned long sc_ar_fpsr; /* floating-point status register */ + unsigned long sc_ar_pfs; /* previous function state */ + unsigned long sc_ar_lc; /* loop count register */ + unsigned long sc_pr; /* predicate registers */ + unsigned long sc_br[8]; /* branch registers */ + /* Note: sc_gr[0] is used as the "uc_link" member of ucontext_t */ + unsigned long sc_gr[32]; /* general registers (static partition) */ + struct ia64_fpreg sc_fr[128]; /* floating-point registers */ + + unsigned long sc_rbs_base; /* NULL or new base of sighandler's rbs */ + unsigned long sc_loadrs; /* see description above */ + + unsigned long sc_ar25; /* cmp8xchg16 uses this */ + unsigned long sc_ar26; /* rsvd for scratch use */ + unsigned long sc_rsvd[12]; /* reserved for future use */ + /* + * The mask must come last so we can increase _NSIG_WORDS + * without breaking binary compatibility. + */ + sigset_t sc_mask; /* signal mask to restore after handler returns */ +}; + +# endif /* __ASSEMBLY__ */ +#endif /* _ASM_IA64_SIGCONTEXT_H */ diff --git a/include/asm-ia64/siginfo.h b/include/asm-ia64/siginfo.h new file mode 100644 index 000000000000..d55f139cbcdc --- /dev/null +++ b/include/asm-ia64/siginfo.h @@ -0,0 +1,141 @@ +#ifndef _ASM_IA64_SIGINFO_H +#define _ASM_IA64_SIGINFO_H + +/* + * Based on <asm-i386/siginfo.h>. + * + * Modified 1998-2002 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +#define SI_PAD_SIZE ((SI_MAX_SIZE/sizeof(int)) - 4) + +#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4) + +#define HAVE_ARCH_SIGINFO_T +#define HAVE_ARCH_COPY_SIGINFO +#define HAVE_ARCH_COPY_SIGINFO_TO_USER + +#include <asm-generic/siginfo.h> + +typedef struct siginfo { + int si_signo; + int si_errno; + int si_code; + int __pad0; + + union { + int _pad[SI_PAD_SIZE]; + + /* kill() */ + struct { + pid_t _pid; /* sender's pid */ + uid_t _uid; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + char _pad[sizeof(__ARCH_SI_UID_T) - sizeof(int)]; + sigval_t _sigval; /* must overlay ._rt._sigval! */ + int _sys_private; /* not to be passed to user */ + } _timer; + + /* POSIX.1b signals */ + struct { + pid_t _pid; /* sender's pid */ + uid_t _uid; /* sender's uid */ + sigval_t _sigval; + } _rt; + + /* SIGCHLD */ + struct { + pid_t _pid; /* which child */ + uid_t _uid; /* sender's uid */ + int _status; /* exit code */ + clock_t _utime; + clock_t _stime; + } _sigchld; + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ + struct { + void __user *_addr; /* faulting insn/memory ref. */ + int _imm; /* immediate value for "break" */ + unsigned int _flags; /* see below */ + unsigned long _isr; /* isr */ + } _sigfault; + + /* SIGPOLL */ + struct { + long _band; /* POLL_IN, POLL_OUT, POLL_MSG (XPG requires a "long") */ + int _fd; + } _sigpoll; + } _sifields; +} siginfo_t; + +#define si_imm _sifields._sigfault._imm /* as per UNIX SysV ABI spec */ +#define si_flags _sifields._sigfault._flags +/* + * si_isr is valid for SIGILL, SIGFPE, SIGSEGV, SIGBUS, and SIGTRAP provided that + * si_code is non-zero and __ISR_VALID is set in si_flags. + */ +#define si_isr _sifields._sigfault._isr + +/* + * Flag values for si_flags: + */ +#define __ISR_VALID_BIT 0 +#define __ISR_VALID (1 << __ISR_VALID_BIT) + +/* + * SIGILL si_codes + */ +#define ILL_BADIADDR (__SI_FAULT|9) /* unimplemented instruction address */ +#define __ILL_BREAK (__SI_FAULT|10) /* illegal break */ +#define __ILL_BNDMOD (__SI_FAULT|11) /* bundle-update (modification) in progress */ +#undef NSIGILL +#define NSIGILL 11 + +/* + * SIGFPE si_codes + */ +#define __FPE_DECOVF (__SI_FAULT|9) /* decimal overflow */ +#define __FPE_DECDIV (__SI_FAULT|10) /* decimal division by zero */ +#define __FPE_DECERR (__SI_FAULT|11) /* packed decimal error */ +#define __FPE_INVASC (__SI_FAULT|12) /* invalid ASCII digit */ +#define __FPE_INVDEC (__SI_FAULT|13) /* invalid decimal digit */ +#undef NSIGFPE +#define NSIGFPE 13 + +/* + * SIGSEGV si_codes + */ +#define __SEGV_PSTKOVF (__SI_FAULT|3) /* paragraph stack overflow */ +#undef NSIGSEGV +#define NSIGSEGV 3 + +/* + * SIGTRAP si_codes + */ +#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */ +#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint or watchpoint */ +#undef NSIGTRAP +#define NSIGTRAP 4 + +#ifdef __KERNEL__ +#include <linux/string.h> + +static inline void +copy_siginfo (siginfo_t *to, siginfo_t *from) +{ + if (from->si_code < 0) + memcpy(to, from, sizeof(siginfo_t)); + else + /* _sigchld is currently the largest know union member */ + memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigchld)); +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM_IA64_SIGINFO_H */ diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h new file mode 100644 index 000000000000..660a759744dd --- /dev/null +++ b/include/asm-ia64/signal.h @@ -0,0 +1,185 @@ +#ifndef _ASM_IA64_SIGNAL_H +#define _ASM_IA64_SIGNAL_H + +/* + * Modified 1998-2001, 2003 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + * + * Unfortunately, this file is being included by bits/signal.h in + * glibc-2.x. Hence the #ifdef __KERNEL__ ugliness. + */ + +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGIOT 6 +#define SIGBUS 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGUSR1 10 +#define SIGSEGV 11 +#define SIGUSR2 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGSTKFLT 16 +#define SIGCHLD 17 +#define SIGCONT 18 +#define SIGSTOP 19 +#define SIGTSTP 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGURG 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGIO 29 +#define SIGPOLL SIGIO +/* +#define SIGLOST 29 +*/ +#define SIGPWR 30 +#define SIGSYS 31 +/* signal 31 is no longer "unused", but the SIGUNUSED macro remains for backwards compatibility */ +#define SIGUNUSED 31 + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_INTERRUPT is a no-op, but left due to historical reasons. + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ +#define SA_NOCLDSTOP 0x00000001 +#define SA_NOCLDWAIT 0x00000002 +#define SA_SIGINFO 0x00000004 +#define SA_ONSTACK 0x08000000 +#define SA_RESTART 0x10000000 +#define SA_NODEFER 0x40000000 +#define SA_RESETHAND 0x80000000 + +#define SA_NOMASK SA_NODEFER +#define SA_ONESHOT SA_RESETHAND +#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */ + +#define SA_RESTORER 0x04000000 + +/* + * sigaltstack controls + */ +#define SS_ONSTACK 1 +#define SS_DISABLE 2 + +/* + * The minimum stack size needs to be fairly large because we want to + * be sure that an app compiled for today's CPUs will continue to run + * on all future CPU models. The CPU model matters because the signal + * frame needs to have space for the complete machine state, including + * all physical stacked registers. The number of physical stacked + * registers is CPU model dependent, but given that the width of + * ar.rsc.loadrs is 14 bits, we can assume that they'll never take up + * more than 16KB of space. + */ +#if 1 + /* + * This is a stupid typo: the value was _meant_ to be 131072 (0x20000), but I typed it + * in wrong. ;-( To preserve backwards compatibility, we leave the kernel at the + * incorrect value and fix libc only. + */ +# define MINSIGSTKSZ 131027 /* min. stack size for sigaltstack() */ +#else +# define MINSIGSTKSZ 131072 /* min. stack size for sigaltstack() */ +#endif +#define SIGSTKSZ 262144 /* default stack size for sigaltstack() */ + +#ifdef __KERNEL__ + +#define _NSIG 64 +#define _NSIG_BPW 64 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +/* + * These values of sa_flags are used only by the kernel as part of the + * irq handling routines. + * + * SA_INTERRUPT is also used by the irq handling routines. + * SA_SHIRQ is for shared interrupt support on PCI and EISA. + */ +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 +#define SA_PERCPU_IRQ 0x02000000 + +#endif /* __KERNEL__ */ + +#define SIG_BLOCK 0 /* for blocking signals */ +#define SIG_UNBLOCK 1 /* for unblocking signals */ +#define SIG_SETMASK 2 /* for setting the signal mask */ + +#define SIG_DFL ((__sighandler_t)0) /* default signal handling */ +#define SIG_IGN ((__sighandler_t)1) /* ignore signal */ +#define SIG_ERR ((__sighandler_t)-1) /* error return from signal */ + +# ifndef __ASSEMBLY__ + +# include <linux/types.h> + +/* Avoid too many header ordering problems. */ +struct siginfo; + +/* Type of a signal handler. */ +typedef void __user (*__sighandler_t)(int); + +typedef struct sigaltstack { + void __user *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#ifdef __KERNEL__ + +/* Most things should be clean enough to redefine this at will, if care + is taken to make libc match. */ + +typedef unsigned long old_sigset_t; + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +struct sigaction { + __sighandler_t sa_handler; + unsigned long sa_flags; + sigset_t sa_mask; /* mask last for extensibility */ +}; + +struct k_sigaction { + struct sigaction sa; +}; + +# include <asm/sigcontext.h> + +#define ptrace_signal_deliver(regs, cookie) do { } while (0) + +void set_sigdelayed(pid_t pid, int signo, int code, void __user *addr); + +#endif /* __KERNEL__ */ + +# endif /* !__ASSEMBLY__ */ +#endif /* _ASM_IA64_SIGNAL_H */ diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h new file mode 100644 index 000000000000..c4a227acfeb0 --- /dev/null +++ b/include/asm-ia64/smp.h @@ -0,0 +1,134 @@ +/* + * SMP Support + * + * Copyright (C) 1999 VA Linux Systems + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> + * (c) Copyright 2001-2003, 2005 Hewlett-Packard Development Company, L.P. + * David Mosberger-Tang <davidm@hpl.hp.com> + * Bjorn Helgaas <bjorn.helgaas@hp.com> + */ +#ifndef _ASM_IA64_SMP_H +#define _ASM_IA64_SMP_H + +#include <linux/config.h> +#include <linux/init.h> +#include <linux/threads.h> +#include <linux/kernel.h> +#include <linux/cpumask.h> + +#include <asm/bitops.h> +#include <asm/io.h> +#include <asm/param.h> +#include <asm/processor.h> +#include <asm/ptrace.h> + +static inline unsigned int +ia64_get_lid (void) +{ + union { + struct { + unsigned long reserved : 16; + unsigned long eid : 8; + unsigned long id : 8; + unsigned long ignored : 32; + } f; + unsigned long bits; + } lid; + + lid.bits = ia64_getreg(_IA64_REG_CR_LID); + return lid.f.id << 8 | lid.f.eid; +} + +#ifdef CONFIG_SMP + +#define XTP_OFFSET 0x1e0008 + +#define SMP_IRQ_REDIRECTION (1 << 0) +#define SMP_IPI_REDIRECTION (1 << 1) + +#define smp_processor_id() (current_thread_info()->cpu) + +extern struct smp_boot_data { + int cpu_count; + int cpu_phys_id[NR_CPUS]; +} smp_boot_data __initdata; + +extern char no_int_routing __devinitdata; + +extern cpumask_t cpu_online_map; +extern void __iomem *ipi_base_addr; +extern unsigned char smp_int_redirect; + +extern volatile int ia64_cpu_to_sapicid[]; +#define cpu_physical_id(i) ia64_cpu_to_sapicid[i] + +extern unsigned long ap_wakeup_vector; + +/* + * Function to map hard smp processor id to logical id. Slow, so don't use this in + * performance-critical code. + */ +static inline int +cpu_logical_id (int cpuid) +{ + int i; + + for (i = 0; i < NR_CPUS; ++i) + if (cpu_physical_id(i) == cpuid) + break; + return i; +} + +/* + * XTP control functions: + * min_xtp : route all interrupts to this CPU + * normal_xtp: nominal XTP value + * max_xtp : never deliver interrupts to this CPU. + */ + +static inline void +min_xtp (void) +{ + if (smp_int_redirect & SMP_IRQ_REDIRECTION) + writeb(0x00, ipi_base_addr + XTP_OFFSET); /* XTP to min */ +} + +static inline void +normal_xtp (void) +{ + if (smp_int_redirect & SMP_IRQ_REDIRECTION) + writeb(0x08, ipi_base_addr + XTP_OFFSET); /* XTP normal */ +} + +static inline void +max_xtp (void) +{ + if (smp_int_redirect & SMP_IRQ_REDIRECTION) + writeb(0x0f, ipi_base_addr + XTP_OFFSET); /* Set XTP to max */ +} + +#define hard_smp_processor_id() ia64_get_lid() + +/* Upping and downing of CPUs */ +extern int __cpu_disable (void); +extern void __cpu_die (unsigned int cpu); +extern void cpu_die (void) __attribute__ ((noreturn)); +extern int __cpu_up (unsigned int cpu); +extern void __init smp_build_cpu_map(void); + +extern void __init init_smp_config (void); +extern void smp_do_timer (struct pt_regs *regs); + +extern int smp_call_function_single (int cpuid, void (*func) (void *info), void *info, + int retry, int wait); +extern void smp_send_reschedule (int cpu); +extern void lock_ipi_calllock(void); +extern void unlock_ipi_calllock(void); + +#else + +#define cpu_logical_id(i) 0 +#define cpu_physical_id(i) ia64_get_lid() + +#endif /* CONFIG_SMP */ +#endif /* _ASM_IA64_SMP_H */ diff --git a/include/asm-ia64/sn/addrs.h b/include/asm-ia64/sn/addrs.h new file mode 100644 index 000000000000..c916bd22767a --- /dev/null +++ b/include/asm-ia64/sn/addrs.h @@ -0,0 +1,238 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 1992-1999,2001-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_ADDRS_H +#define _ASM_IA64_SN_ADDRS_H + +#include <asm/percpu.h> +#include <asm/sn/types.h> +#include <asm/sn/arch.h> +#include <asm/sn/pda.h> + +/* + * Memory/SHUB Address Format: + * +-+---------+--+--------------+ + * |0| NASID |AS| NodeOffset | + * +-+---------+--+--------------+ + * + * NASID: (low NASID bit is 0) Memory and SHUB MMRs + * AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0 + * 00: Local Resources and MMR space + * Top bit of NodeOffset + * 0: Local resources space + * node id: + * 0: IA64/NT compatibility space + * 2: Local MMR Space + * 4: Local memory, regardless of local node id + * 1: Global MMR space + * 01: GET space. + * 10: AMO space. + * 11: Cacheable memory space. + * + * NodeOffset: byte offset + * + * + * TIO address format: + * +-+----------+--+--------------+ + * |0| NASID |AS| Nodeoffset | + * +-+----------+--+--------------+ + * + * NASID: (low NASID bit is 1) TIO + * AS: 2-bit Chiplet Identifier + * 00: TIO LB (Indicates TIO MMR access.) + * 01: TIO ICE (indicates coretalk space access.) + * + * NodeOffset: top bit must be set. + * + * + * Note that in both of the above address formats, the low + * NASID bit indicates if the reference is to the SHUB or TIO MMRs. + */ + + +/* + * Define basic shift & mask constants for manipulating NASIDs and AS values. + */ +#define NASID_BITMASK (sn_hub_info->nasid_bitmask) +#define NASID_SHIFT (sn_hub_info->nasid_shift) +#define AS_SHIFT (sn_hub_info->as_shift) +#define AS_BITMASK 0x3UL + +#define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT) +#define AS_MASK ((u64)AS_BITMASK << AS_SHIFT) +#define REGION_BITS 0xe000000000000000UL + + +/* + * AS values. These are the same on both SHUB1 & SHUB2. + */ +#define AS_GET_VAL 1UL +#define AS_AMO_VAL 2UL +#define AS_CAC_VAL 3UL +#define AS_GET_SPACE (AS_GET_VAL << AS_SHIFT) +#define AS_AMO_SPACE (AS_AMO_VAL << AS_SHIFT) +#define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT) + + +/* + * Base addresses for various address ranges. + */ +#define CACHED 0xe000000000000000UL +#define UNCACHED 0xc000000000000000UL +#define UNCACHED_PHYS 0x8000000000000000UL + + +/* + * Virtual Mode Local & Global MMR space. + */ +#define SH1_LOCAL_MMR_OFFSET 0x8000000000UL +#define SH2_LOCAL_MMR_OFFSET 0x0200000000UL +#define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET) +#define LOCAL_MMR_SPACE (UNCACHED | LOCAL_MMR_OFFSET) +#define LOCAL_PHYS_MMR_SPACE (UNCACHED_PHYS | LOCAL_MMR_OFFSET) + +#define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL +#define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL +#define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET) +#define GLOBAL_MMR_SPACE (UNCACHED | GLOBAL_MMR_OFFSET) + +/* + * Physical mode addresses + */ +#define GLOBAL_PHYS_MMR_SPACE (UNCACHED_PHYS | GLOBAL_MMR_OFFSET) + + +/* + * Clear region & AS bits. + */ +#define TO_PHYS_MASK (~(REGION_BITS | AS_MASK)) + + +/* + * Misc NASID manipulation. + */ +#define NASID_SPACE(n) ((u64)(n) << NASID_SHIFT) +#define REMOTE_ADDR(n,a) (NASID_SPACE(n) | (a)) +#define NODE_OFFSET(x) ((x) & (NODE_ADDRSPACE_SIZE - 1)) +#define NODE_ADDRSPACE_SIZE (1UL << AS_SHIFT) +#define NASID_GET(x) (int) (((u64) (x) >> NASID_SHIFT) & NASID_BITMASK) +#define LOCAL_MMR_ADDR(a) (LOCAL_MMR_SPACE | (a)) +#define GLOBAL_MMR_ADDR(n,a) (GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a)) +#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a)) +#define GLOBAL_CAC_ADDR(n,a) (CAC_BASE | REMOTE_ADDR(n,a)) +#define CHANGE_NASID(n,x) ((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n))) + + +/* non-II mmr's start at top of big window space (4G) */ +#define BWIN_TOP 0x0000000100000000UL + +/* + * general address defines + */ +#define CAC_BASE (CACHED | AS_CAC_SPACE) +#define AMO_BASE (UNCACHED | AS_AMO_SPACE) +#define GET_BASE (CACHED | AS_GET_SPACE) + +/* + * Convert Memory addresses between various addressing modes. + */ +#define TO_PHYS(x) (TO_PHYS_MASK & (x)) +#define TO_CAC(x) (CAC_BASE | TO_PHYS(x)) +#define TO_AMO(x) (AMO_BASE | TO_PHYS(x)) +#define TO_GET(x) (GET_BASE | TO_PHYS(x)) + + +/* + * Covert from processor physical address to II/TIO physical address: + * II - squeeze out the AS bits + * TIO- requires a chiplet id in bits 38-39. For DMA to memory, + * the chiplet id is zero. If we implement TIO-TIO dma, we might need + * to insert a chiplet id into this macro. However, it is our belief + * right now that this chiplet id will be ICE, which is also zero. + */ +#define PHYS_TO_TIODMA(x) ( (((u64)(x) & NASID_MASK) << 2) | NODE_OFFSET(x)) +#define PHYS_TO_DMA(x) ( (((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x)) + + +/* + * The following definitions pertain to the IO special address + * space. They define the location of the big and little windows + * of any given node. + */ +#define BWIN_SIZE_BITS 29 /* big window size: 512M */ +#define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */ +#define NODE_SWIN_BASE(n, w) ((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \ + : RAW_NODE_SWIN_BASE(n, w)) +#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n)) +#define BWIN_SIZE (1UL << BWIN_SIZE_BITS) +#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE) +#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS)) +#define RAW_NODE_SWIN_BASE(n, w) (NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS)) +#define BWIN_WIDGET_MASK 0x7 +#define BWIN_WINDOWNUM(x) (((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK) + +#define TIO_BWIN_WINDOW_SELECT_MASK 0x7 +#define TIO_BWIN_WINDOWNUM(x) (((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK) + + + +/* + * The following definitions pertain to the IO special address + * space. They define the location of the big and little windows + * of any given node. + */ + +#define SWIN_SIZE_BITS 24 +#define SWIN_WIDGET_MASK 0xF + +#define TIO_SWIN_SIZE_BITS 28 +#define TIO_SWIN_SIZE (1UL << TIO_SWIN_SIZE_BITS) +#define TIO_SWIN_WIDGET_MASK 0x3 + +/* + * Convert smallwindow address to xtalk address. + * + * 'addr' can be physical or virtual address, but will be converted + * to Xtalk address in the range 0 -> SWINZ_SIZEMASK + */ +#define SWIN_WIDGETNUM(x) (((x) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK) +#define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK) + + +/* + * The following macros produce the correct base virtual address for + * the hub registers. The REMOTE_HUB_* macro produce + * the address for the specified hub's registers. The intent is + * that the appropriate PI, MD, NI, or II register would be substituted + * for x. + * + * WARNING: + * When certain Hub chip workaround are defined, it's not sufficient + * to dereference the *_HUB_ADDR() macros. You should instead use + * HUB_L() and HUB_S() if you must deal with pointers to hub registers. + * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S(). + * They're always safe. + */ +#define REMOTE_HUB_ADDR(n,x) \ + ((n & 1) ? \ + /* TIO: */ \ + ((volatile u64 *)(GLOBAL_MMR_ADDR(n,x))) \ + : /* SHUB: */ \ + (((x) & BWIN_TOP) ? ((volatile u64 *)(GLOBAL_MMR_ADDR(n,x)))\ + : ((volatile u64 *)(NODE_SWIN_BASE(n,1) + 0x800000 + (x))))) + + + +#define HUB_L(x) (*((volatile typeof(*x) *)x)) +#define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d)) + +#define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a))) +#define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d)) + + +#endif /* _ASM_IA64_SN_ADDRS_H */ diff --git a/include/asm-ia64/sn/arch.h b/include/asm-ia64/sn/arch.h new file mode 100644 index 000000000000..7c349f07916a --- /dev/null +++ b/include/asm-ia64/sn/arch.h @@ -0,0 +1,52 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * SGI specific setup. + * + * Copyright (C) 1995-1997,1999,2001-2004 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) + */ +#ifndef _ASM_IA64_SN_ARCH_H +#define _ASM_IA64_SN_ARCH_H + +#include <asm/types.h> +#include <asm/percpu.h> +#include <asm/sn/types.h> +#include <asm/sn/sn_cpuid.h> + +/* + * The following defines attributes of the HUB chip. These attributes are + * frequently referenced. They are kept in the per-cpu data areas of each cpu. + * They are kept together in a struct to minimize cache misses. + */ +struct sn_hub_info_s { + u8 shub2; + u8 nasid_shift; + u8 as_shift; + u8 shub_1_1_found; + u16 nasid_bitmask; +}; +DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); +#define sn_hub_info (&__get_cpu_var(__sn_hub_info)) +#define is_shub2() (sn_hub_info->shub2) +#define is_shub1() (sn_hub_info->shub2 == 0) + +/* + * Use this macro to test if shub 1.1 wars should be enabled + */ +#define enable_shub_wars_1_1() (sn_hub_info->shub_1_1_found) + + +/* + * This is the maximum number of nodes that can be part of a kernel. + * Effectively, it's the maximum number of compact node ids (cnodeid_t). + * This is not necessarily the same as MAX_NASIDS. + */ +#define MAX_COMPACT_NODES 2048 +#define CPUS_PER_NODE 4 + +extern void sn_flush_all_caches(long addr, long bytes); + +#endif /* _ASM_IA64_SN_ARCH_H */ diff --git a/include/asm-ia64/sn/bte.h b/include/asm-ia64/sn/bte.h new file mode 100644 index 000000000000..0ec27f99c181 --- /dev/null +++ b/include/asm-ia64/sn/bte.h @@ -0,0 +1,148 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. + */ + + +#ifndef _ASM_IA64_SN_BTE_H +#define _ASM_IA64_SN_BTE_H + +#include <linux/timer.h> +#include <linux/spinlock.h> +#include <linux/cache.h> +#include <asm/sn/types.h> + + +/* #define BTE_DEBUG */ +/* #define BTE_DEBUG_VERBOSE */ + +#ifdef BTE_DEBUG +# define BTE_PRINTK(x) printk x /* Terse */ +# ifdef BTE_DEBUG_VERBOSE +# define BTE_PRINTKV(x) printk x /* Verbose */ +# else +# define BTE_PRINTKV(x) +# endif /* BTE_DEBUG_VERBOSE */ +#else +# define BTE_PRINTK(x) +# define BTE_PRINTKV(x) +#endif /* BTE_DEBUG */ + + +/* BTE status register only supports 16 bits for length field */ +#define BTE_LEN_BITS (16) +#define BTE_LEN_MASK ((1 << BTE_LEN_BITS) - 1) +#define BTE_MAX_XFER ((1 << BTE_LEN_BITS) * L1_CACHE_BYTES) + + +/* Define hardware */ +#define BTES_PER_NODE 2 + + +/* Define hardware modes */ +#define BTE_NOTIFY (IBCT_NOTIFY) +#define BTE_NORMAL BTE_NOTIFY +#define BTE_ZERO_FILL (BTE_NOTIFY | IBCT_ZFIL_MODE) +/* Use a reserved bit to let the caller specify a wait for any BTE */ +#define BTE_WACQUIRE (0x4000) +/* Use the BTE on the node with the destination memory */ +#define BTE_USE_DEST (BTE_WACQUIRE << 1) +/* Use any available BTE interface on any node for the transfer */ +#define BTE_USE_ANY (BTE_USE_DEST << 1) +/* macro to force the IBCT0 value valid */ +#define BTE_VALID_MODE(x) ((x) & (IBCT_NOTIFY | IBCT_ZFIL_MODE)) + +#define BTE_ACTIVE (IBLS_BUSY | IBLS_ERROR) +#define BTE_WORD_AVAILABLE (IBLS_BUSY << 1) +#define BTE_WORD_BUSY (~BTE_WORD_AVAILABLE) + +/* + * Some macros to simplify reading. + * Start with macros to locate the BTE control registers. + */ +#define BTE_LNSTAT_LOAD(_bte) \ + HUB_L(_bte->bte_base_addr) +#define BTE_LNSTAT_STORE(_bte, _x) \ + HUB_S(_bte->bte_base_addr, (_x)) +#define BTE_SRC_STORE(_bte, _x) \ + HUB_S(_bte->bte_base_addr + (BTEOFF_SRC/8), (_x)) +#define BTE_DEST_STORE(_bte, _x) \ + HUB_S(_bte->bte_base_addr + (BTEOFF_DEST/8), (_x)) +#define BTE_CTRL_STORE(_bte, _x) \ + HUB_S(_bte->bte_base_addr + (BTEOFF_CTRL/8), (_x)) +#define BTE_NOTIF_STORE(_bte, _x) \ + HUB_S(_bte->bte_base_addr + (BTEOFF_NOTIFY/8), (_x)) + + +/* Possible results from bte_copy and bte_unaligned_copy */ +/* The following error codes map into the BTE hardware codes + * IIO_ICRB_ECODE_* (in shubio.h). The hardware uses + * an error code of 0 (IIO_ICRB_ECODE_DERR), but we want zero + * to mean BTE_SUCCESS, so add one (BTEFAIL_OFFSET) to the error + * codes to give the following error codes. + */ +#define BTEFAIL_OFFSET 1 + +typedef enum { + BTE_SUCCESS, /* 0 is success */ + BTEFAIL_DIR, /* Directory error due to IIO access*/ + BTEFAIL_POISON, /* poison error on IO access (write to poison page) */ + BTEFAIL_WERR, /* Write error (ie WINV to a Read only line) */ + BTEFAIL_ACCESS, /* access error (protection violation) */ + BTEFAIL_PWERR, /* Partial Write Error */ + BTEFAIL_PRERR, /* Partial Read Error */ + BTEFAIL_TOUT, /* CRB Time out */ + BTEFAIL_XTERR, /* Incoming xtalk pkt had error bit */ + BTEFAIL_NOTAVAIL, /* BTE not available */ +} bte_result_t; + + +/* + * Structure defining a bte. An instance of this + * structure is created in the nodepda for each + * bte on that node (as defined by BTES_PER_NODE) + * This structure contains everything necessary + * to work with a BTE. + */ +struct bteinfo_s { + volatile u64 notify ____cacheline_aligned; + u64 *bte_base_addr ____cacheline_aligned; + spinlock_t spinlock; + cnodeid_t bte_cnode; /* cnode */ + int bte_error_count; /* Number of errors encountered */ + int bte_num; /* 0 --> BTE0, 1 --> BTE1 */ + int cleanup_active; /* Interface is locked for cleanup */ + volatile bte_result_t bh_error; /* error while processing */ + volatile u64 *most_rcnt_na; +}; + + +/* + * Function prototypes (functions defined in bte.c, used elsewhere) + */ +extern bte_result_t bte_copy(u64, u64, u64, u64, void *); +extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64); +extern void bte_error_handler(unsigned long); + +#define bte_zero(dest, len, mode, notification) \ + bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification) + +/* + * The following is the prefered way of calling bte_unaligned_copy + * If the copy is fully cache line aligned, then bte_copy is + * used instead. Since bte_copy is inlined, this saves a call + * stack. NOTE: bte_copy is called synchronously and does block + * until the transfer is complete. In order to get the asynch + * version of bte_copy, you must perform this check yourself. + */ +#define BTE_UNALIGNED_COPY(src, dest, len, mode) \ + (((len & L1_CACHE_MASK) || (src & L1_CACHE_MASK) || \ + (dest & L1_CACHE_MASK)) ? \ + bte_unaligned_copy(src, dest, len, mode) : \ + bte_copy(src, dest, len, mode, NULL)) + + +#endif /* _ASM_IA64_SN_BTE_H */ diff --git a/include/asm-ia64/sn/clksupport.h b/include/asm-ia64/sn/clksupport.h new file mode 100644 index 000000000000..d340c365a824 --- /dev/null +++ b/include/asm-ia64/sn/clksupport.h @@ -0,0 +1,28 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ + +/* + * This file contains definitions for accessing a platform supported high resolution + * clock. The clock is monitonically increasing and can be accessed from any node + * in the system. The clock is synchronized across nodes - all nodes see the + * same value. + * + * RTC_COUNTER_ADDR - contains the address of the counter + * + */ + +#ifndef _ASM_IA64_SN_CLKSUPPORT_H +#define _ASM_IA64_SN_CLKSUPPORT_H + +extern unsigned long sn_rtc_cycles_per_second; + +#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC)) + +#define rtc_time() (*RTC_COUNTER_ADDR) + +#endif /* _ASM_IA64_SN_CLKSUPPORT_H */ diff --git a/include/asm-ia64/sn/fetchop.h b/include/asm-ia64/sn/fetchop.h new file mode 100644 index 000000000000..5f4ad8f4b5d2 --- /dev/null +++ b/include/asm-ia64/sn/fetchop.h @@ -0,0 +1,85 @@ +/* + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_FETCHOP_H +#define _ASM_IA64_SN_FETCHOP_H + +#include <linux/config.h> + +#define FETCHOP_BASENAME "sgi_fetchop" +#define FETCHOP_FULLNAME "/dev/sgi_fetchop" + + + +#define FETCHOP_VAR_SIZE 64 /* 64 byte per fetchop variable */ + +#define FETCHOP_LOAD 0 +#define FETCHOP_INCREMENT 8 +#define FETCHOP_DECREMENT 16 +#define FETCHOP_CLEAR 24 + +#define FETCHOP_STORE 0 +#define FETCHOP_AND 24 +#define FETCHOP_OR 32 + +#define FETCHOP_CLEAR_CACHE 56 + +#define FETCHOP_LOAD_OP(addr, op) ( \ + *(volatile long *)((char*) (addr) + (op))) + +#define FETCHOP_STORE_OP(addr, op, x) ( \ + *(volatile long *)((char*) (addr) + (op)) = (long) (x)) + +#ifdef __KERNEL__ + +/* + * Convert a region 6 (kaddr) address to the address of the fetchop variable + */ +#define FETCHOP_KADDR_TO_MSPEC_ADDR(kaddr) TO_MSPEC(kaddr) + + +/* + * Each Atomic Memory Operation (AMO formerly known as fetchop) + * variable is 64 bytes long. The first 8 bytes are used. The + * remaining 56 bytes are unaddressable due to the operation taking + * that portion of the address. + * + * NOTE: The AMO_t _MUST_ be placed in either the first or second half + * of the cache line. The cache line _MUST NOT_ be used for anything + * other than additional AMO_t entries. This is because there are two + * addresses which reference the same physical cache line. One will + * be a cached entry with the memory type bits all set. This address + * may be loaded into processor cache. The AMO_t will be referenced + * uncached via the memory special memory type. If any portion of the + * cached cache-line is modified, when that line is flushed, it will + * overwrite the uncached value in physical memory and lead to + * inconsistency. + */ +typedef struct { + u64 variable; + u64 unused[7]; +} AMO_t; + + +/* + * The following APIs are externalized to the kernel to allocate/free pages of + * fetchop variables. + * fetchop_kalloc_page - Allocate/initialize 1 fetchop page on the + * specified cnode. + * fetchop_kfree_page - Free a previously allocated fetchop page + */ + +unsigned long fetchop_kalloc_page(int nid); +void fetchop_kfree_page(unsigned long maddr); + + +#endif /* __KERNEL__ */ + +#endif /* _ASM_IA64_SN_FETCHOP_H */ + diff --git a/include/asm-ia64/sn/geo.h b/include/asm-ia64/sn/geo.h new file mode 100644 index 000000000000..f566343d25f8 --- /dev/null +++ b/include/asm-ia64/sn/geo.h @@ -0,0 +1,124 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_GEO_H +#define _ASM_IA64_SN_GEO_H + +/* The geoid_t implementation below is based loosely on the pcfg_t + implementation in sys/SN/promcfg.h. */ + +/* Type declaractions */ + +/* Size of a geoid_t structure (must be before decl. of geoid_u) */ +#define GEOID_SIZE 8 /* Would 16 be better? The size can + be different on different platforms. */ + +#define MAX_SLABS 0xe /* slabs per module */ + +typedef unsigned char geo_type_t; + +/* Fields common to all substructures */ +typedef struct geo_any_s { + moduleid_t module; /* The module (box) this h/w lives in */ + geo_type_t type; /* What type of h/w is named by this geoid_t */ + slabid_t slab; /* The logical assembly within the module */ +} geo_any_t; + +/* Additional fields for particular types of hardware */ +typedef struct geo_node_s { + geo_any_t any; /* No additional fields needed */ +} geo_node_t; + +typedef struct geo_rtr_s { + geo_any_t any; /* No additional fields needed */ +} geo_rtr_t; + +typedef struct geo_iocntl_s { + geo_any_t any; /* No additional fields needed */ +} geo_iocntl_t; + +typedef struct geo_pcicard_s { + geo_iocntl_t any; + char bus; /* Bus/widget number */ + char slot; /* PCI slot number */ +} geo_pcicard_t; + +/* Subcomponents of a node */ +typedef struct geo_cpu_s { + geo_node_t node; + char slice; /* Which CPU on the node */ +} geo_cpu_t; + +typedef struct geo_mem_s { + geo_node_t node; + char membus; /* The memory bus on the node */ + char memslot; /* The memory slot on the bus */ +} geo_mem_t; + + +typedef union geoid_u { + geo_any_t any; + geo_node_t node; + geo_iocntl_t iocntl; + geo_pcicard_t pcicard; + geo_rtr_t rtr; + geo_cpu_t cpu; + geo_mem_t mem; + char padsize[GEOID_SIZE]; +} geoid_t; + + +/* Preprocessor macros */ + +#define GEO_MAX_LEN 48 /* max. formatted length, plus some pad: + module/001c07/slab/5/node/memory/2/slot/4 */ + +/* Values for geo_type_t */ +#define GEO_TYPE_INVALID 0 +#define GEO_TYPE_MODULE 1 +#define GEO_TYPE_NODE 2 +#define GEO_TYPE_RTR 3 +#define GEO_TYPE_IOCNTL 4 +#define GEO_TYPE_IOCARD 5 +#define GEO_TYPE_CPU 6 +#define GEO_TYPE_MEM 7 +#define GEO_TYPE_MAX (GEO_TYPE_MEM+1) + +/* Parameter for hwcfg_format_geoid_compt() */ +#define GEO_COMPT_MODULE 1 +#define GEO_COMPT_SLAB 2 +#define GEO_COMPT_IOBUS 3 +#define GEO_COMPT_IOSLOT 4 +#define GEO_COMPT_CPU 5 +#define GEO_COMPT_MEMBUS 6 +#define GEO_COMPT_MEMSLOT 7 + +#define GEO_INVALID_STR "<invalid>" + +#define INVALID_NASID ((nasid_t)-1) +#define INVALID_CNODEID ((cnodeid_t)-1) +#define INVALID_PNODEID ((pnodeid_t)-1) +#define INVALID_SLAB (slabid_t)-1 +#define INVALID_MODULE ((moduleid_t)-1) +#define INVALID_PARTID ((partid_t)-1) + +static inline slabid_t geo_slab(geoid_t g) +{ + return (g.any.type == GEO_TYPE_INVALID) ? + INVALID_SLAB : g.any.slab; +} + +static inline moduleid_t geo_module(geoid_t g) +{ + return (g.any.type == GEO_TYPE_INVALID) ? + INVALID_MODULE : g.any.module; +} + +extern geoid_t cnodeid_get_geoid(cnodeid_t cnode); + +#endif /* _ASM_IA64_SN_GEO_H */ diff --git a/include/asm-ia64/sn/intr.h b/include/asm-ia64/sn/intr.h new file mode 100644 index 000000000000..e51471fb0867 --- /dev/null +++ b/include/asm-ia64/sn/intr.h @@ -0,0 +1,56 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_INTR_H +#define _ASM_IA64_SN_INTR_H + +#define SGI_UART_VECTOR (0xe9) +#define SGI_PCIBR_ERROR (0x33) + +/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */ +#define SGI_XPC_ACTIVATE (0x30) +#define SGI_II_ERROR (0x31) +#define SGI_XBOW_ERROR (0x32) +#define SGI_PCIBR_ERROR (0x33) +#define SGI_ACPI_SCI_INT (0x34) +#define SGI_TIOCA_ERROR (0x35) +#define SGI_TIO_ERROR (0x36) +#define SGI_TIOCX_ERROR (0x37) +#define SGI_MMTIMER_VECTOR (0x38) +#define SGI_XPC_NOTIFY (0xe7) + +#define IA64_SN2_FIRST_DEVICE_VECTOR (0x3c) +#define IA64_SN2_LAST_DEVICE_VECTOR (0xe6) + +#define SN2_IRQ_RESERVED (0x1) +#define SN2_IRQ_CONNECTED (0x2) +#define SN2_IRQ_SHARED (0x4) + +// The SN PROM irq struct +struct sn_irq_info { + struct sn_irq_info *irq_next; /* sharing irq list */ + short irq_nasid; /* Nasid IRQ is assigned to */ + int irq_slice; /* slice IRQ is assigned to */ + int irq_cpuid; /* kernel logical cpuid */ + int irq_irq; /* the IRQ number */ + int irq_int_bit; /* Bridge interrupt pin */ + uint64_t irq_xtalkaddr; /* xtalkaddr IRQ is sent to */ + int irq_bridge_type;/* pciio asic type (pciio.h) */ + void *irq_bridge; /* bridge generating irq */ + void *irq_pciioinfo; /* associated pciio_info_t */ + int irq_last_intr; /* For Shub lb lost intr WAR */ + int irq_cookie; /* unique cookie */ + int irq_flags; /* flags */ + int irq_share_cnt; /* num devices sharing IRQ */ +}; + +extern void sn_send_IPI_phys(int, long, int, int); + +#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector) + +#endif /* _ASM_IA64_SN_INTR_H */ diff --git a/include/asm-ia64/sn/io.h b/include/asm-ia64/sn/io.h new file mode 100644 index 000000000000..42209733f6b1 --- /dev/null +++ b/include/asm-ia64/sn/io.h @@ -0,0 +1,265 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_SN_IO_H +#define _ASM_SN_IO_H +#include <linux/compiler.h> +#include <asm/intrinsics.h> + +extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */ +extern void __sn_mmiowb(void); /* Forward definition */ + +extern int numionodes; + +#define __sn_mf_a() ia64_mfa() + +extern void sn_dma_flush(unsigned long); + +#define __sn_inb ___sn_inb +#define __sn_inw ___sn_inw +#define __sn_inl ___sn_inl +#define __sn_outb ___sn_outb +#define __sn_outw ___sn_outw +#define __sn_outl ___sn_outl +#define __sn_readb ___sn_readb +#define __sn_readw ___sn_readw +#define __sn_readl ___sn_readl +#define __sn_readq ___sn_readq +#define __sn_readb_relaxed ___sn_readb_relaxed +#define __sn_readw_relaxed ___sn_readw_relaxed +#define __sn_readl_relaxed ___sn_readl_relaxed +#define __sn_readq_relaxed ___sn_readq_relaxed + +/* + * The following routines are SN Platform specific, called when + * a reference is made to inX/outX set macros. SN Platform + * inX set of macros ensures that Posted DMA writes on the + * Bridge is flushed. + * + * The routines should be self explainatory. + */ + +static inline unsigned int +___sn_inb (unsigned long port) +{ + volatile unsigned char *addr; + unsigned char ret = -1; + + if ((addr = sn_io_addr(port))) { + ret = *addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + } + return ret; +} + +static inline unsigned int +___sn_inw (unsigned long port) +{ + volatile unsigned short *addr; + unsigned short ret = -1; + + if ((addr = sn_io_addr(port))) { + ret = *addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + } + return ret; +} + +static inline unsigned int +___sn_inl (unsigned long port) +{ + volatile unsigned int *addr; + unsigned int ret = -1; + + if ((addr = sn_io_addr(port))) { + ret = *addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + } + return ret; +} + +static inline void +___sn_outb (unsigned char val, unsigned long port) +{ + volatile unsigned char *addr; + + if ((addr = sn_io_addr(port))) { + *addr = val; + __sn_mmiowb(); + } +} + +static inline void +___sn_outw (unsigned short val, unsigned long port) +{ + volatile unsigned short *addr; + + if ((addr = sn_io_addr(port))) { + *addr = val; + __sn_mmiowb(); + } +} + +static inline void +___sn_outl (unsigned int val, unsigned long port) +{ + volatile unsigned int *addr; + + if ((addr = sn_io_addr(port))) { + *addr = val; + __sn_mmiowb(); + } +} + +/* + * The following routines are SN Platform specific, called when + * a reference is made to readX/writeX set macros. SN Platform + * readX set of macros ensures that Posted DMA writes on the + * Bridge is flushed. + * + * The routines should be self explainatory. + */ + +static inline unsigned char +___sn_readb (const volatile void __iomem *addr) +{ + unsigned char val; + + val = *(volatile unsigned char __force *)addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + return val; +} + +static inline unsigned short +___sn_readw (const volatile void __iomem *addr) +{ + unsigned short val; + + val = *(volatile unsigned short __force *)addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + return val; +} + +static inline unsigned int +___sn_readl (const volatile void __iomem *addr) +{ + unsigned int val; + + val = *(volatile unsigned int __force *)addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + return val; +} + +static inline unsigned long +___sn_readq (const volatile void __iomem *addr) +{ + unsigned long val; + + val = *(volatile unsigned long __force *)addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + return val; +} + +/* + * For generic and SN2 kernels, we have a set of fast access + * PIO macros. These macros are provided on SN Platform + * because the normal inX and readX macros perform an + * additional task of flushing Post DMA request on the Bridge. + * + * These routines should be self explainatory. + */ + +static inline unsigned int +sn_inb_fast (unsigned long port) +{ + volatile unsigned char *addr = (unsigned char *)port; + unsigned char ret; + + ret = *addr; + __sn_mf_a(); + return ret; +} + +static inline unsigned int +sn_inw_fast (unsigned long port) +{ + volatile unsigned short *addr = (unsigned short *)port; + unsigned short ret; + + ret = *addr; + __sn_mf_a(); + return ret; +} + +static inline unsigned int +sn_inl_fast (unsigned long port) +{ + volatile unsigned int *addr = (unsigned int *)port; + unsigned int ret; + + ret = *addr; + __sn_mf_a(); + return ret; +} + +static inline unsigned char +___sn_readb_relaxed (const volatile void __iomem *addr) +{ + return *(volatile unsigned char __force *)addr; +} + +static inline unsigned short +___sn_readw_relaxed (const volatile void __iomem *addr) +{ + return *(volatile unsigned short __force *)addr; +} + +static inline unsigned int +___sn_readl_relaxed (const volatile void __iomem *addr) +{ + return *(volatile unsigned int __force *) addr; +} + +static inline unsigned long +___sn_readq_relaxed (const volatile void __iomem *addr) +{ + return *(volatile unsigned long __force *) addr; +} + +struct pci_dev; + +static inline int +sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan) +{ + + if (vchan > 1) { + return -1; + } + + if (!(*addr >> 32)) /* Using a mask here would be cleaner */ + return 0; /* but this generates better code */ + + if (vchan == 1) { + /* Set Bit 57 */ + *addr |= (1UL << 57); + } else { + /* Clear Bit 57 */ + *addr &= ~(1UL << 57); + } + + return 0; +} + +#endif /* _ASM_SN_IO_H */ diff --git a/include/asm-ia64/sn/klconfig.h b/include/asm-ia64/sn/klconfig.h new file mode 100644 index 000000000000..9f920c70a62a --- /dev/null +++ b/include/asm-ia64/sn/klconfig.h @@ -0,0 +1,272 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Derived from IRIX <sys/SN/klconfig.h>. + * + * Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (C) 1999 by Ralf Baechle + */ +#ifndef _ASM_IA64_SN_KLCONFIG_H +#define _ASM_IA64_SN_KLCONFIG_H + +/* + * The KLCONFIG structures store info about the various BOARDs found + * during Hardware Discovery. In addition, it stores info about the + * components found on the BOARDs. + */ + +typedef s32 klconf_off_t; + + +/* Functions/macros needed to use this structure */ + +typedef struct kl_config_hdr { + char pad[20]; + klconf_off_t ch_board_info; /* the link list of boards */ + char pad0[88]; +} kl_config_hdr_t; + + +#define NODE_OFFSET_TO_LBOARD(nasid,off) (lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off))) + +/* + * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD + * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to + * the LOCAL/current NODE. REMOTE means it is attached to a different + * node.(TBD - Need a way to treat ROUTER boards.) + * + * There are 2 different structures to represent these boards - + * lboard - Local board, rboard - remote board. These 2 structures + * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer + * Figure below). The first byte of the rboard or lboard structure + * is used to find out its type - no unions are used. + * If it is a lboard, then the config info of this board will be found + * on the local node. (LOCAL NODE BASE + offset value gives pointer to + * the structure. + * If it is a rboard, the local structure contains the node number + * and the offset of the beginning of the LINKED LIST on the remote node. + * The details of the hardware on a remote node can be built locally, + * if required, by reading the LINKED LIST on the remote node and + * ignoring all the rboards on that node. + * + * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the + * First board info on the remote node. The remote node list is + * traversed as the local list, using the REMOTE BASE ADDRESS and not + * the local base address and ignoring all rboard values. + * + * + KLCONFIG + + +------------+ +------------+ +------------+ +------------+ + | lboard | +-->| lboard | +-->| rboard | +-->| lboard | + +------------+ | +------------+ | +------------+ | +------------+ + | board info | | | board info | | |errinfo,bptr| | | board info | + +------------+ | +------------+ | +------------+ | +------------+ + | offset |--+ | offset |--+ | offset |--+ |offset=NULL | + +------------+ +------------+ +------------+ +------------+ + + + +------------+ + | board info | + +------------+ +--------------------------------+ + | compt 1 |------>| type, rev, diaginfo, size ... | (CPU) + +------------+ +--------------------------------+ + | compt 2 |--+ + +------------+ | +--------------------------------+ + | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK) + +------------+ +--------------------------------+ + | errinfo |--+ + +------------+ | +--------------------------------+ + +--->|r/l brd errinfo,compt err flags | + +--------------------------------+ + + * + * Each BOARD consists of COMPONENTs and the BOARD structure has + * pointers (offsets) to its COMPONENT structure. + * The COMPONENT structure has version info, size and speed info, revision, + * error info and the NIC info. This structure can accommodate any + * BOARD with arbitrary COMPONENT composition. + * + * The ERRORINFO part of each BOARD has error information + * that describes errors about the BOARD itself. It also has flags to + * indicate the COMPONENT(s) on the board that have errors. The error + * information specific to the COMPONENT is present in the respective + * COMPONENT structure. + * + * The ERRORINFO structure is also treated like a COMPONENT, ie. the + * BOARD has pointers(offset) to the ERRORINFO structure. The rboard + * structure also has a pointer to the ERRORINFO structure. This is + * the place to store ERRORINFO about a REMOTE NODE, if the HUB on + * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where + * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can + * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info + * which is present on the REMOTE NODE.(TBD) + * REMOTE ERRINFO can be stored on any of the nearest nodes + * or on all the nearest nodes.(TBD) + * Like BOARD structures, REMOTE ERRINFO structures can be built locally + * using the rboard errinfo pointer. + * + * In order to get useful information from this Data organization, a set of + * interface routines are provided (TBD). The important thing to remember while + * manipulating the structures, is that, the NODE number information should + * be used. If the NODE is non-zero (remote) then each offset should + * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR. + * This includes offsets for BOARDS, COMPONENTS and ERRORINFO. + * + * Note that these structures do not provide much info about connectivity. + * That info will be part of HWGRAPH, which is an extension of the cfg_t + * data structure. (ref IP27prom/cfg.h) It has to be extended to include + * the IO part of the Network(TBD). + * + * The data structures below define the above concepts. + */ + + +/* + * BOARD classes + */ + +#define KLCLASS_MASK 0xf0 +#define KLCLASS_NONE 0x00 +#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */ +#define KLCLASS_CPU KLCLASS_NODE +#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI + and the non-graphics widget boards */ +#define KLCLASS_ROUTER 0x30 /* Router board */ +#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board + so that we can record error info */ +#define KLCLASS_IOBRICK 0x70 /* IP35 iobrick */ +#define KLCLASS_MAX 8 /* Bump this if a new CLASS is added */ + +#define KLCLASS(_x) ((_x) & KLCLASS_MASK) + + +/* + * board types + */ + +#define KLTYPE_MASK 0x0f +#define KLTYPE(_x) ((_x) & KLTYPE_MASK) + +#define KLTYPE_SNIA (KLCLASS_CPU | 0x1) +#define KLTYPE_TIO (KLCLASS_CPU | 0x2) + +#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1) +#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3) +#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4) + +#define KLTYPE_IOBRICK_XBOW (KLCLASS_MIDPLANE | 0x2) + +#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0) +#define KLTYPE_NBRICK (KLCLASS_IOBRICK | 0x4) +#define KLTYPE_PXBRICK (KLCLASS_IOBRICK | 0x6) +#define KLTYPE_IXBRICK (KLCLASS_IOBRICK | 0x7) +#define KLTYPE_CGBRICK (KLCLASS_IOBRICK | 0x8) +#define KLTYPE_OPUSBRICK (KLCLASS_IOBRICK | 0x9) +#define KLTYPE_SABRICK (KLCLASS_IOBRICK | 0xa) +#define KLTYPE_IABRICK (KLCLASS_IOBRICK | 0xb) +#define KLTYPE_PABRICK (KLCLASS_IOBRICK | 0xc) +#define KLTYPE_GABRICK (KLCLASS_IOBRICK | 0xd) + + +/* + * board structures + */ + +#define MAX_COMPTS_PER_BRD 24 + +typedef struct lboard_s { + klconf_off_t brd_next_any; /* Next BOARD */ + unsigned char struct_type; /* type of structure, local or remote */ + unsigned char brd_type; /* type+class */ + unsigned char brd_sversion; /* version of this structure */ + unsigned char brd_brevision; /* board revision */ + unsigned char brd_promver; /* board prom version, if any */ + unsigned char brd_flags; /* Enabled, Disabled etc */ + unsigned char brd_slot; /* slot number */ + unsigned short brd_debugsw; /* Debug switches */ + geoid_t brd_geoid; /* geo id */ + partid_t brd_partition; /* Partition number */ + unsigned short brd_diagval; /* diagnostic value */ + unsigned short brd_diagparm; /* diagnostic parameter */ + unsigned char brd_inventory; /* inventory history */ + unsigned char brd_numcompts; /* Number of components */ + nic_t brd_nic; /* Number in CAN */ + nasid_t brd_nasid; /* passed parameter */ + klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */ + klconf_off_t brd_errinfo; /* Board's error information */ + struct lboard_s *brd_parent; /* Logical parent for this brd */ + char pad0[4]; + unsigned char brd_confidence; /* confidence that the board is bad */ + nasid_t brd_owner; /* who owns this board */ + unsigned char brd_nic_flags; /* To handle 8 more NICs */ + char pad1[24]; /* future expansion */ + char brd_name[32]; + nasid_t brd_next_same_host; /* host of next brd w/same nasid */ + klconf_off_t brd_next_same; /* Next BOARD with same nasid */ +} lboard_t; + +#define KLCF_NUM_COMPS(_brd) ((_brd)->brd_numcompts) +#define NODE_OFFSET_TO_KLINFO(n,off) ((klinfo_t*) TO_NODE_CAC(n,off)) +#define KLCF_NEXT(_brd) \ + ((_brd)->brd_next_same ? \ + (NODE_OFFSET_TO_LBOARD((_brd)->brd_next_same_host, (_brd)->brd_next_same)): NULL) +#define KLCF_NEXT_ANY(_brd) \ + ((_brd)->brd_next_any ? \ + (NODE_OFFSET_TO_LBOARD(NASID_GET(_brd), (_brd)->brd_next_any)): NULL) +#define KLCF_COMP(_brd, _ndx) \ + ((((_brd)->brd_compts[(_ndx)]) == 0) ? 0 : \ + (NODE_OFFSET_TO_KLINFO(NASID_GET(_brd), (_brd)->brd_compts[(_ndx)]))) + + +/* + * Generic info structure. This stores common info about a + * component. + */ + +typedef struct klinfo_s { /* Generic info */ + unsigned char struct_type; /* type of this structure */ + unsigned char struct_version; /* version of this structure */ + unsigned char flags; /* Enabled, disabled etc */ + unsigned char revision; /* component revision */ + unsigned short diagval; /* result of diagnostics */ + unsigned short diagparm; /* diagnostic parameter */ + unsigned char inventory; /* previous inventory status */ + unsigned short partid; /* widget part number */ + nic_t nic; /* MUst be aligned properly */ + unsigned char physid; /* physical id of component */ + unsigned int virtid; /* virtual id as seen by system */ + unsigned char widid; /* Widget id - if applicable */ + nasid_t nasid; /* node number - from parent */ + char pad1; /* pad out structure. */ + char pad2; /* pad out structure. */ + void *data; + klconf_off_t errinfo; /* component specific errors */ + unsigned short pad3; /* pci fields have moved over to */ + unsigned short pad4; /* klbri_t */ +} klinfo_t ; + + +static inline lboard_t *find_lboard_any(lboard_t * start, unsigned char brd_type) +{ + /* Search all boards stored on this node. */ + + while (start) { + if (start->brd_type == brd_type) + return start; + start = KLCF_NEXT_ANY(start); + } + /* Didn't find it. */ + return (lboard_t *) NULL; +} + + +/* external declarations of Linux kernel functions. */ + +extern lboard_t *root_lboard[]; +extern klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char type); +extern klinfo_t *find_first_component(lboard_t *brd, unsigned char type); + +#endif /* _ASM_IA64_SN_KLCONFIG_H */ diff --git a/include/asm-ia64/sn/l1.h b/include/asm-ia64/sn/l1.h new file mode 100644 index 000000000000..d5dbd55e44b5 --- /dev/null +++ b/include/asm-ia64/sn/l1.h @@ -0,0 +1,36 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved. + */ + +#ifndef _ASM_IA64_SN_L1_H +#define _ASM_IA64_SN_L1_H + +/* brick type response codes */ +#define L1_BRICKTYPE_PX 0x23 /* # */ +#define L1_BRICKTYPE_PE 0x25 /* % */ +#define L1_BRICKTYPE_N_p0 0x26 /* & */ +#define L1_BRICKTYPE_IP45 0x34 /* 4 */ +#define L1_BRICKTYPE_IP41 0x35 /* 5 */ +#define L1_BRICKTYPE_TWISTER 0x36 /* 6 */ /* IP53 & ROUTER */ +#define L1_BRICKTYPE_IX 0x3d /* = */ +#define L1_BRICKTYPE_IP34 0x61 /* a */ +#define L1_BRICKTYPE_GA 0x62 /* b */ +#define L1_BRICKTYPE_C 0x63 /* c */ +#define L1_BRICKTYPE_OPUS_TIO 0x66 /* f */ +#define L1_BRICKTYPE_I 0x69 /* i */ +#define L1_BRICKTYPE_N 0x6e /* n */ +#define L1_BRICKTYPE_OPUS 0x6f /* o */ +#define L1_BRICKTYPE_P 0x70 /* p */ +#define L1_BRICKTYPE_R 0x72 /* r */ +#define L1_BRICKTYPE_CHI_CG 0x76 /* v */ +#define L1_BRICKTYPE_X 0x78 /* x */ +#define L1_BRICKTYPE_X2 0x79 /* y */ +#define L1_BRICKTYPE_SA 0x5e /* ^ */ /* TIO bringup brick */ +#define L1_BRICKTYPE_PA 0x6a /* j */ +#define L1_BRICKTYPE_IA 0x6b /* k */ + +#endif /* _ASM_IA64_SN_L1_H */ diff --git a/include/asm-ia64/sn/leds.h b/include/asm-ia64/sn/leds.h new file mode 100644 index 000000000000..66cf8c4d92c9 --- /dev/null +++ b/include/asm-ia64/sn/leds.h @@ -0,0 +1,33 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_LEDS_H +#define _ASM_IA64_SN_LEDS_H + +#include <asm/sn/addrs.h> +#include <asm/sn/pda.h> +#include <asm/sn/shub_mmr.h> + +#define LED0 (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0)) +#define LED_CPU_SHIFT 16 + +#define LED_CPU_HEARTBEAT 0x01 +#define LED_CPU_ACTIVITY 0x02 +#define LED_ALWAYS_SET 0x00 + +/* + * Basic macros for flashing the LEDS on an SGI SN. + */ + +static __inline__ void +set_led_bits(u8 value, u8 mask) +{ + pda->led_state = (pda->led_state & ~mask) | (value & mask); + *pda->led_address = (short) pda->led_state; +} + +#endif /* _ASM_IA64_SN_LEDS_H */ + diff --git a/include/asm-ia64/sn/module.h b/include/asm-ia64/sn/module.h new file mode 100644 index 000000000000..734e980ece2f --- /dev/null +++ b/include/asm-ia64/sn/module.h @@ -0,0 +1,127 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_MODULE_H +#define _ASM_IA64_SN_MODULE_H + +/* parameter for format_module_id() */ +#define MODULE_FORMAT_BRIEF 1 +#define MODULE_FORMAT_LONG 2 +#define MODULE_FORMAT_LCD 3 + +/* + * Module id format + * + * 31-16 Rack ID (encoded class, group, number - 16-bit unsigned int) + * 15-8 Brick type (8-bit ascii character) + * 7-0 Bay (brick position in rack (0-63) - 8-bit unsigned int) + * + */ + +/* + * Macros for getting the brick type + */ +#define MODULE_BTYPE_MASK 0xff00 +#define MODULE_BTYPE_SHFT 8 +#define MODULE_GET_BTYPE(_m) (((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT) +#define MODULE_BT_TO_CHAR(_b) ((char)(_b)) +#define MODULE_GET_BTCHAR(_m) (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m))) + +/* + * Macros for getting the rack ID. + */ +#define MODULE_RACK_MASK 0xffff0000 +#define MODULE_RACK_SHFT 16 +#define MODULE_GET_RACK(_m) (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT) + +/* + * Macros for getting the brick position + */ +#define MODULE_BPOS_MASK 0x00ff +#define MODULE_BPOS_SHFT 0 +#define MODULE_GET_BPOS(_m) (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT) + +/* + * Macros for encoding and decoding rack IDs + * A rack number consists of three parts: + * class (0==CPU/mixed, 1==I/O), group, number + * + * Rack number is stored just as it is displayed on the screen: + * a 3-decimal-digit number. + */ +#define RACK_CLASS_DVDR 100 +#define RACK_GROUP_DVDR 10 +#define RACK_NUM_DVDR 1 + +#define RACK_CREATE_RACKID(_c, _g, _n) ((_c) * RACK_CLASS_DVDR + \ + (_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR) + +#define RACK_GET_CLASS(_r) ((_r) / RACK_CLASS_DVDR) +#define RACK_GET_GROUP(_r) (((_r) - RACK_GET_CLASS(_r) * \ + RACK_CLASS_DVDR) / RACK_GROUP_DVDR) +#define RACK_GET_NUM(_r) (((_r) - RACK_GET_CLASS(_r) * \ + RACK_CLASS_DVDR - RACK_GET_GROUP(_r) * \ + RACK_GROUP_DVDR) / RACK_NUM_DVDR) + +/* + * Macros for encoding and decoding rack IDs + * A rack number consists of three parts: + * class 1 bit, 0==CPU/mixed, 1==I/O + * group 2 bits for CPU/mixed, 3 bits for I/O + * number 3 bits for CPU/mixed, 2 bits for I/O (1 based) + */ +#define RACK_GROUP_BITS(_r) (RACK_GET_CLASS(_r) ? 3 : 2) +#define RACK_NUM_BITS(_r) (RACK_GET_CLASS(_r) ? 2 : 3) + +#define RACK_CLASS_MASK(_r) 0x20 +#define RACK_CLASS_SHFT(_r) 5 +#define RACK_ADD_CLASS(_r, _c) \ + ((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r)) + +#define RACK_GROUP_SHFT(_r) RACK_NUM_BITS(_r) +#define RACK_GROUP_MASK(_r) \ + ( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) ) +#define RACK_ADD_GROUP(_r, _g) \ + ((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r)) + +#define RACK_NUM_SHFT(_r) 0 +#define RACK_NUM_MASK(_r) \ + ( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) ) +#define RACK_ADD_NUM(_r, _n) \ + ((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r)) + + +/* + * Brick type definitions + */ +#define MAX_BRICK_TYPES 256 /* brick type is stored as uchar */ + +extern char brick_types[]; + +#define MODULE_CBRICK 0 +#define MODULE_RBRICK 1 +#define MODULE_IBRICK 2 +#define MODULE_KBRICK 3 +#define MODULE_XBRICK 4 +#define MODULE_DBRICK 5 +#define MODULE_PBRICK 6 +#define MODULE_NBRICK 7 +#define MODULE_PEBRICK 8 +#define MODULE_PXBRICK 9 +#define MODULE_IXBRICK 10 +#define MODULE_CGBRICK 11 +#define MODULE_OPUSBRICK 12 +#define MODULE_SABRICK 13 /* TIO BringUp Brick */ +#define MODULE_IABRICK 14 +#define MODULE_PABRICK 15 +#define MODULE_GABRICK 16 +#define MODULE_OPUS_TIO 17 /* OPUS TIO Riser */ + +extern char brick_types[]; +extern void format_module_id(char *, moduleid_t, int); + +#endif /* _ASM_IA64_SN_MODULE_H */ diff --git a/include/asm-ia64/sn/nodepda.h b/include/asm-ia64/sn/nodepda.h new file mode 100644 index 000000000000..2fbde33656e6 --- /dev/null +++ b/include/asm-ia64/sn/nodepda.h @@ -0,0 +1,86 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_NODEPDA_H +#define _ASM_IA64_SN_NODEPDA_H + + +#include <asm/semaphore.h> +#include <asm/irq.h> +#include <asm/sn/arch.h> +#include <asm/sn/intr.h> +#include <asm/sn/pda.h> +#include <asm/sn/bte.h> + +/* + * NUMA Node-Specific Data structures are defined in this file. + * In particular, this is the location of the node PDA. + * A pointer to the right node PDA is saved in each CPU PDA. + */ + +/* + * Node-specific data structure. + * + * One of these structures is allocated on each node of a NUMA system. + * + * This structure provides a convenient way of keeping together + * all per-node data structures. + */ +struct phys_cpuid { + short nasid; + char subnode; + char slice; +}; + +struct nodepda_s { + void *pdinfo; /* Platform-dependent per-node info */ + spinlock_t bist_lock; + + /* + * The BTEs on this node are shared by the local cpus + */ + struct bteinfo_s bte_if[BTES_PER_NODE]; /* Virtual Interface */ + struct timer_list bte_recovery_timer; + spinlock_t bte_recovery_lock; + + /* + * Array of pointers to the nodepdas for each node. + */ + struct nodepda_s *pernode_pdaindr[MAX_COMPACT_NODES]; + + /* + * Array of physical cpu identifiers. Indexed by cpuid. + */ + struct phys_cpuid phys_cpuid[NR_CPUS]; +}; + +typedef struct nodepda_s nodepda_t; + +/* + * Access Functions for node PDA. + * Since there is one nodepda for each node, we need a convenient mechanism + * to access these nodepdas without cluttering code with #ifdefs. + * The next set of definitions provides this. + * Routines are expected to use + * + * nodepda -> to access node PDA for the node on which code is running + * subnodepda -> to access subnode PDA for the subnode on which code is running + * + * NODEPDA(cnode) -> to access node PDA for cnodeid + * SUBNODEPDA(cnode,sn) -> to access subnode PDA for cnodeid/subnode + */ + +#define nodepda pda->p_nodepda /* Ptr to this node's PDA */ +#define NODEPDA(cnode) (nodepda->pernode_pdaindr[cnode]) + +/* + * Check if given a compact node id the corresponding node has all the + * cpus disabled. + */ +#define is_headless_node(cnode) (nr_cpus_node(cnode) == 0) + +#endif /* _ASM_IA64_SN_NODEPDA_H */ diff --git a/include/asm-ia64/sn/pda.h b/include/asm-ia64/sn/pda.h new file mode 100644 index 000000000000..e940d3647c80 --- /dev/null +++ b/include/asm-ia64/sn/pda.h @@ -0,0 +1,80 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_PDA_H +#define _ASM_IA64_SN_PDA_H + +#include <linux/cache.h> +#include <asm/percpu.h> +#include <asm/system.h> +#include <asm/sn/bte.h> + + +/* + * CPU-specific data structure. + * + * One of these structures is allocated for each cpu of a NUMA system. + * + * This structure provides a convenient way of keeping together + * all SN per-cpu data structures. + */ + +typedef struct pda_s { + + /* Having a pointer in the begining of PDA tends to increase + * the chance of having this pointer in cache. (Yes something + * else gets pushed out). Doing this reduces the number of memory + * access to all nodepda variables to be one + */ + struct nodepda_s *p_nodepda; /* Pointer to Per node PDA */ + struct subnodepda_s *p_subnodepda; /* Pointer to CPU subnode PDA */ + + /* + * Support for SN LEDs + */ + volatile short *led_address; + u8 led_state; + u8 hb_state; /* supports blinking heartbeat leds */ + unsigned int hb_count; + + unsigned int idle_flag; + + volatile unsigned long *bedrock_rev_id; + volatile unsigned long *pio_write_status_addr; + unsigned long pio_write_status_val; + volatile unsigned long *pio_shub_war_cam_addr; + + unsigned long sn_soft_irr[4]; + unsigned long sn_in_service_ivecs[4]; + short cnodeid_to_nasid_table[MAX_NUMNODES]; + int sn_lb_int_war_ticks; + int sn_last_irq; + int sn_first_irq; +} pda_t; + + +#define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) + +/* + * PDA + * Per-cpu private data area for each cpu. The PDA is located immediately after + * the IA64 cpu_data area. A full page is allocated for the cp_data area for each + * cpu but only a small amout of the page is actually used. We put the SNIA PDA + * in the same page as the cpu_data area. Note that there is a check in the setup + * code to verify that we don't overflow the page. + * + * Seems like we should should cache-line align the pda so that any changes in the + * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128 + * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later. + */ +DECLARE_PER_CPU(struct pda_s, pda_percpu); + +#define pda (&__ia64_per_cpu_var(pda_percpu)) + +#define pdacpu(cpu) (&per_cpu(pda_percpu, cpu)) + +#endif /* _ASM_IA64_SN_PDA_H */ diff --git a/include/asm-ia64/sn/rw_mmr.h b/include/asm-ia64/sn/rw_mmr.h new file mode 100644 index 000000000000..f40fd1a5510d --- /dev/null +++ b/include/asm-ia64/sn/rw_mmr.h @@ -0,0 +1,74 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved. + */ +#ifndef _ASM_IA64_SN_RW_MMR_H +#define _ASM_IA64_SN_RW_MMR_H + + +/* + * This file contains macros used to access MMR registers via + * uncached physical addresses. + * pio_phys_read_mmr - read an MMR + * pio_phys_write_mmr - write an MMR + * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 + * Second MMR will be skipped if address is NULL + * + * Addresses passed to these routines should be uncached physical addresses + * ie., 0x80000.... + */ + + +extern inline long +pio_phys_read_mmr(volatile long *mmr) +{ + long val; + asm volatile + ("mov r2=psr;;" + "rsm psr.i | psr.dt;;" + "srlz.i;;" + "ld8.acq %0=[%1];;" + "mov psr.l=r2;;" + "srlz.i;;" + : "=r"(val) + : "r"(mmr) + : "r2"); + return val; +} + + + +extern inline void +pio_phys_write_mmr(volatile long *mmr, long val) +{ + asm volatile + ("mov r2=psr;;" + "rsm psr.i | psr.dt;;" + "srlz.i;;" + "st8.rel [%0]=%1;;" + "mov psr.l=r2;;" + "srlz.i;;" + :: "r"(mmr), "r"(val) + : "r2", "memory"); +} + +extern inline void +pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2) +{ + asm volatile + ("mov r2=psr;;" + "rsm psr.i | psr.dt | psr.ic;;" + "cmp.ne p9,p0=%2,r0;" + "srlz.i;;" + "st8.rel [%0]=%1;" + "(p9) st8.rel [%2]=%3;;" + "mov psr.l=r2;;" + "srlz.i;;" + :: "r"(mmr1), "r"(val1), "r"(mmr2), "r"(val2) + : "p9", "r2", "memory"); +} + +#endif /* _ASM_IA64_SN_RW_MMR_H */ diff --git a/include/asm-ia64/sn/shub_mmr.h b/include/asm-ia64/sn/shub_mmr.h new file mode 100644 index 000000000000..5c2fcf13d5ce --- /dev/null +++ b/include/asm-ia64/sn/shub_mmr.h @@ -0,0 +1,441 @@ +/* + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_SHUB_MMR_H +#define _ASM_IA64_SN_SHUB_MMR_H + +/* ==================================================================== */ +/* Register "SH_IPI_INT" */ +/* SHub Inter-Processor Interrupt Registers */ +/* ==================================================================== */ +#define SH1_IPI_INT 0x0000000110000380 +#define SH2_IPI_INT 0x0000000010000380 + +/* SH_IPI_INT_TYPE */ +/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ +#define SH_IPI_INT_TYPE_SHFT 0 +#define SH_IPI_INT_TYPE_MASK 0x0000000000000007 + +/* SH_IPI_INT_AGT */ +/* Description: Agent, must be 0 for SHub */ +#define SH_IPI_INT_AGT_SHFT 3 +#define SH_IPI_INT_AGT_MASK 0x0000000000000008 + +/* SH_IPI_INT_PID */ +/* Description: Processor ID, same setting as on targeted McKinley */ +#define SH_IPI_INT_PID_SHFT 4 +#define SH_IPI_INT_PID_MASK 0x00000000000ffff0 + +/* SH_IPI_INT_BASE */ +/* Description: Optional interrupt vector area, 2MB aligned */ +#define SH_IPI_INT_BASE_SHFT 21 +#define SH_IPI_INT_BASE_MASK 0x0003ffffffe00000 + +/* SH_IPI_INT_IDX */ +/* Description: Targeted McKinley interrupt vector */ +#define SH_IPI_INT_IDX_SHFT 52 +#define SH_IPI_INT_IDX_MASK 0x0ff0000000000000 + +/* SH_IPI_INT_SEND */ +/* Description: Send Interrupt Message to PI, This generates a puls */ +#define SH_IPI_INT_SEND_SHFT 63 +#define SH_IPI_INT_SEND_MASK 0x8000000000000000 + +/* ==================================================================== */ +/* Register "SH_EVENT_OCCURRED" */ +/* SHub Interrupt Event Occurred */ +/* ==================================================================== */ +#define SH1_EVENT_OCCURRED 0x0000000110010000 +#define SH1_EVENT_OCCURRED_ALIAS 0x0000000110010008 +#define SH2_EVENT_OCCURRED 0x0000000010010000 +#define SH2_EVENT_OCCURRED_ALIAS 0x0000000010010008 + +/* ==================================================================== */ +/* Register "SH_PI_CAM_CONTROL" */ +/* CRB CAM MMR Access Control */ +/* ==================================================================== */ +#define SH1_PI_CAM_CONTROL 0x0000000120050300 + +/* ==================================================================== */ +/* Register "SH_SHUB_ID" */ +/* SHub ID Number */ +/* ==================================================================== */ +#define SH1_SHUB_ID 0x0000000110060580 +#define SH1_SHUB_ID_REVISION_SHFT 28 +#define SH1_SHUB_ID_REVISION_MASK 0x00000000f0000000 + +/* ==================================================================== */ +/* Register "SH_RTC" */ +/* Real-time Clock */ +/* ==================================================================== */ +#define SH1_RTC 0x00000001101c0000 +#define SH2_RTC 0x00000002101c0000 +#define SH_RTC_MASK 0x007fffffffffffff + +/* ==================================================================== */ +/* Register "SH_PIO_WRITE_STATUS_0|1" */ +/* PIO Write Status for CPU 0 & 1 */ +/* ==================================================================== */ +#define SH1_PIO_WRITE_STATUS_0 0x0000000120070200 +#define SH1_PIO_WRITE_STATUS_1 0x0000000120070280 +#define SH2_PIO_WRITE_STATUS_0 0x0000000020070200 +#define SH2_PIO_WRITE_STATUS_1 0x0000000020070280 +#define SH2_PIO_WRITE_STATUS_2 0x0000000020070300 +#define SH2_PIO_WRITE_STATUS_3 0x0000000020070380 + +/* SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK */ +/* Description: Deadlock response detected */ +#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT 1 +#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK 0x0000000000000002 + +/* SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT */ +/* Description: Count of currently pending PIO writes */ +#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_SHFT 56 +#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK 0x3f00000000000000 + +/* ==================================================================== */ +/* Register "SH_PIO_WRITE_STATUS_0_ALIAS" */ +/* ==================================================================== */ +#define SH1_PIO_WRITE_STATUS_0_ALIAS 0x0000000120070208 +#define SH2_PIO_WRITE_STATUS_0_ALIAS 0x0000000020070208 + +/* ==================================================================== */ +/* Register "SH_EVENT_OCCURRED" */ +/* SHub Interrupt Event Occurred */ +/* ==================================================================== */ +/* SH_EVENT_OCCURRED_UART_INT */ +/* Description: Pending Junk Bus UART Interrupt */ +#define SH_EVENT_OCCURRED_UART_INT_SHFT 20 +#define SH_EVENT_OCCURRED_UART_INT_MASK 0x0000000000100000 + +/* SH_EVENT_OCCURRED_IPI_INT */ +/* Description: Pending IPI Interrupt */ +#define SH_EVENT_OCCURRED_IPI_INT_SHFT 28 +#define SH_EVENT_OCCURRED_IPI_INT_MASK 0x0000000010000000 + +/* SH_EVENT_OCCURRED_II_INT0 */ +/* Description: Pending II 0 Interrupt */ +#define SH_EVENT_OCCURRED_II_INT0_SHFT 29 +#define SH_EVENT_OCCURRED_II_INT0_MASK 0x0000000020000000 + +/* SH_EVENT_OCCURRED_II_INT1 */ +/* Description: Pending II 1 Interrupt */ +#define SH_EVENT_OCCURRED_II_INT1_SHFT 30 +#define SH_EVENT_OCCURRED_II_INT1_MASK 0x0000000040000000 + +/* ==================================================================== */ +/* LEDS */ +/* ==================================================================== */ +#define SH1_REAL_JUNK_BUS_LED0 0x7fed00000UL +#define SH1_REAL_JUNK_BUS_LED1 0x7fed10000UL +#define SH1_REAL_JUNK_BUS_LED2 0x7fed20000UL +#define SH1_REAL_JUNK_BUS_LED3 0x7fed30000UL + +#define SH2_REAL_JUNK_BUS_LED0 0xf0000000UL +#define SH2_REAL_JUNK_BUS_LED1 0xf0010000UL +#define SH2_REAL_JUNK_BUS_LED2 0xf0020000UL +#define SH2_REAL_JUNK_BUS_LED3 0xf0030000UL + +/* ==================================================================== */ +/* Register "SH1_PTC_0" */ +/* Puge Translation Cache Message Configuration Information */ +/* ==================================================================== */ +#define SH1_PTC_0 0x00000001101a0000 + +/* SH1_PTC_0_A */ +/* Description: Type */ +#define SH1_PTC_0_A_SHFT 0 + +/* SH1_PTC_0_PS */ +/* Description: Page Size */ +#define SH1_PTC_0_PS_SHFT 2 + +/* SH1_PTC_0_RID */ +/* Description: Region ID */ +#define SH1_PTC_0_RID_SHFT 8 + +/* SH1_PTC_0_START */ +/* Description: Start */ +#define SH1_PTC_0_START_SHFT 63 + +/* ==================================================================== */ +/* Register "SH1_PTC_1" */ +/* Puge Translation Cache Message Configuration Information */ +/* ==================================================================== */ +#define SH1_PTC_1 0x00000001101a0080 + +/* SH1_PTC_1_START */ +/* Description: PTC_1 Start */ +#define SH1_PTC_1_START_SHFT 63 + + +/* ==================================================================== */ +/* Register "SH2_PTC" */ +/* Puge Translation Cache Message Configuration Information */ +/* ==================================================================== */ +#define SH2_PTC 0x0000000170000000 + +/* SH2_PTC_A */ +/* Description: Type */ +#define SH2_PTC_A_SHFT 0 + +/* SH2_PTC_PS */ +/* Description: Page Size */ +#define SH2_PTC_PS_SHFT 2 + +/* SH2_PTC_RID */ +/* Description: Region ID */ +#define SH2_PTC_RID_SHFT 4 + +/* SH2_PTC_START */ +/* Description: Start */ +#define SH2_PTC_START_SHFT 63 + +/* SH2_PTC_ADDR_RID */ +/* Description: Region ID */ +#define SH2_PTC_ADDR_SHFT 4 +#define SH2_PTC_ADDR_MASK 0x1ffffffffffff000 + +/* ==================================================================== */ +/* Register "SH_RTC1_INT_CONFIG" */ +/* SHub RTC 1 Interrupt Config Registers */ +/* ==================================================================== */ + +#define SH1_RTC1_INT_CONFIG 0x0000000110001480 +#define SH2_RTC1_INT_CONFIG 0x0000000010001480 +#define SH_RTC1_INT_CONFIG_MASK 0x0ff3ffffffefffff +#define SH_RTC1_INT_CONFIG_INIT 0x0000000000000000 + +/* SH_RTC1_INT_CONFIG_TYPE */ +/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ +#define SH_RTC1_INT_CONFIG_TYPE_SHFT 0 +#define SH_RTC1_INT_CONFIG_TYPE_MASK 0x0000000000000007 + +/* SH_RTC1_INT_CONFIG_AGT */ +/* Description: Agent, must be 0 for SHub */ +#define SH_RTC1_INT_CONFIG_AGT_SHFT 3 +#define SH_RTC1_INT_CONFIG_AGT_MASK 0x0000000000000008 + +/* SH_RTC1_INT_CONFIG_PID */ +/* Description: Processor ID, same setting as on targeted McKinley */ +#define SH_RTC1_INT_CONFIG_PID_SHFT 4 +#define SH_RTC1_INT_CONFIG_PID_MASK 0x00000000000ffff0 + +/* SH_RTC1_INT_CONFIG_BASE */ +/* Description: Optional interrupt vector area, 2MB aligned */ +#define SH_RTC1_INT_CONFIG_BASE_SHFT 21 +#define SH_RTC1_INT_CONFIG_BASE_MASK 0x0003ffffffe00000 + +/* SH_RTC1_INT_CONFIG_IDX */ +/* Description: Targeted McKinley interrupt vector */ +#define SH_RTC1_INT_CONFIG_IDX_SHFT 52 +#define SH_RTC1_INT_CONFIG_IDX_MASK 0x0ff0000000000000 + +/* ==================================================================== */ +/* Register "SH_RTC1_INT_ENABLE" */ +/* SHub RTC 1 Interrupt Enable Registers */ +/* ==================================================================== */ + +#define SH1_RTC1_INT_ENABLE 0x0000000110001500 +#define SH2_RTC1_INT_ENABLE 0x0000000010001500 +#define SH_RTC1_INT_ENABLE_MASK 0x0000000000000001 +#define SH_RTC1_INT_ENABLE_INIT 0x0000000000000000 + +/* SH_RTC1_INT_ENABLE_RTC1_ENABLE */ +/* Description: Enable RTC 1 Interrupt */ +#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_SHFT 0 +#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_MASK 0x0000000000000001 + +/* ==================================================================== */ +/* Register "SH_RTC2_INT_CONFIG" */ +/* SHub RTC 2 Interrupt Config Registers */ +/* ==================================================================== */ + +#define SH1_RTC2_INT_CONFIG 0x0000000110001580 +#define SH2_RTC2_INT_CONFIG 0x0000000010001580 +#define SH_RTC2_INT_CONFIG_MASK 0x0ff3ffffffefffff +#define SH_RTC2_INT_CONFIG_INIT 0x0000000000000000 + +/* SH_RTC2_INT_CONFIG_TYPE */ +/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ +#define SH_RTC2_INT_CONFIG_TYPE_SHFT 0 +#define SH_RTC2_INT_CONFIG_TYPE_MASK 0x0000000000000007 + +/* SH_RTC2_INT_CONFIG_AGT */ +/* Description: Agent, must be 0 for SHub */ +#define SH_RTC2_INT_CONFIG_AGT_SHFT 3 +#define SH_RTC2_INT_CONFIG_AGT_MASK 0x0000000000000008 + +/* SH_RTC2_INT_CONFIG_PID */ +/* Description: Processor ID, same setting as on targeted McKinley */ +#define SH_RTC2_INT_CONFIG_PID_SHFT 4 +#define SH_RTC2_INT_CONFIG_PID_MASK 0x00000000000ffff0 + +/* SH_RTC2_INT_CONFIG_BASE */ +/* Description: Optional interrupt vector area, 2MB aligned */ +#define SH_RTC2_INT_CONFIG_BASE_SHFT 21 +#define SH_RTC2_INT_CONFIG_BASE_MASK 0x0003ffffffe00000 + +/* SH_RTC2_INT_CONFIG_IDX */ +/* Description: Targeted McKinley interrupt vector */ +#define SH_RTC2_INT_CONFIG_IDX_SHFT 52 +#define SH_RTC2_INT_CONFIG_IDX_MASK 0x0ff0000000000000 + +/* ==================================================================== */ +/* Register "SH_RTC2_INT_ENABLE" */ +/* SHub RTC 2 Interrupt Enable Registers */ +/* ==================================================================== */ + +#define SH1_RTC2_INT_ENABLE 0x0000000110001600 +#define SH2_RTC2_INT_ENABLE 0x0000000010001600 +#define SH_RTC2_INT_ENABLE_MASK 0x0000000000000001 +#define SH_RTC2_INT_ENABLE_INIT 0x0000000000000000 + +/* SH_RTC2_INT_ENABLE_RTC2_ENABLE */ +/* Description: Enable RTC 2 Interrupt */ +#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_SHFT 0 +#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_MASK 0x0000000000000001 + +/* ==================================================================== */ +/* Register "SH_RTC3_INT_CONFIG" */ +/* SHub RTC 3 Interrupt Config Registers */ +/* ==================================================================== */ + +#define SH1_RTC3_INT_CONFIG 0x0000000110001680 +#define SH2_RTC3_INT_CONFIG 0x0000000010001680 +#define SH_RTC3_INT_CONFIG_MASK 0x0ff3ffffffefffff +#define SH_RTC3_INT_CONFIG_INIT 0x0000000000000000 + +/* SH_RTC3_INT_CONFIG_TYPE */ +/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ +#define SH_RTC3_INT_CONFIG_TYPE_SHFT 0 +#define SH_RTC3_INT_CONFIG_TYPE_MASK 0x0000000000000007 + +/* SH_RTC3_INT_CONFIG_AGT */ +/* Description: Agent, must be 0 for SHub */ +#define SH_RTC3_INT_CONFIG_AGT_SHFT 3 +#define SH_RTC3_INT_CONFIG_AGT_MASK 0x0000000000000008 + +/* SH_RTC3_INT_CONFIG_PID */ +/* Description: Processor ID, same setting as on targeted McKinley */ +#define SH_RTC3_INT_CONFIG_PID_SHFT 4 +#define SH_RTC3_INT_CONFIG_PID_MASK 0x00000000000ffff0 + +/* SH_RTC3_INT_CONFIG_BASE */ +/* Description: Optional interrupt vector area, 2MB aligned */ +#define SH_RTC3_INT_CONFIG_BASE_SHFT 21 +#define SH_RTC3_INT_CONFIG_BASE_MASK 0x0003ffffffe00000 + +/* SH_RTC3_INT_CONFIG_IDX */ +/* Description: Targeted McKinley interrupt vector */ +#define SH_RTC3_INT_CONFIG_IDX_SHFT 52 +#define SH_RTC3_INT_CONFIG_IDX_MASK 0x0ff0000000000000 + +/* ==================================================================== */ +/* Register "SH_RTC3_INT_ENABLE" */ +/* SHub RTC 3 Interrupt Enable Registers */ +/* ==================================================================== */ + +#define SH1_RTC3_INT_ENABLE 0x0000000110001700 +#define SH2_RTC3_INT_ENABLE 0x0000000010001700 +#define SH_RTC3_INT_ENABLE_MASK 0x0000000000000001 +#define SH_RTC3_INT_ENABLE_INIT 0x0000000000000000 + +/* SH_RTC3_INT_ENABLE_RTC3_ENABLE */ +/* Description: Enable RTC 3 Interrupt */ +#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_SHFT 0 +#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_MASK 0x0000000000000001 + +/* SH_EVENT_OCCURRED_RTC1_INT */ +/* Description: Pending RTC 1 Interrupt */ +#define SH_EVENT_OCCURRED_RTC1_INT_SHFT 24 +#define SH_EVENT_OCCURRED_RTC1_INT_MASK 0x0000000001000000 + +/* SH_EVENT_OCCURRED_RTC2_INT */ +/* Description: Pending RTC 2 Interrupt */ +#define SH_EVENT_OCCURRED_RTC2_INT_SHFT 25 +#define SH_EVENT_OCCURRED_RTC2_INT_MASK 0x0000000002000000 + +/* SH_EVENT_OCCURRED_RTC3_INT */ +/* Description: Pending RTC 3 Interrupt */ +#define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26 +#define SH_EVENT_OCCURRED_RTC3_INT_MASK 0x0000000004000000 + +/* ==================================================================== */ +/* Register "SH_INT_CMPB" */ +/* RTC Compare Value for Processor B */ +/* ==================================================================== */ + +#define SH1_INT_CMPB 0x00000001101b0080 +#define SH2_INT_CMPB 0x00000000101b0080 +#define SH_INT_CMPB_MASK 0x007fffffffffffff +#define SH_INT_CMPB_INIT 0x0000000000000000 + +/* SH_INT_CMPB_REAL_TIME_CMPB */ +/* Description: Real Time Clock Compare */ +#define SH_INT_CMPB_REAL_TIME_CMPB_SHFT 0 +#define SH_INT_CMPB_REAL_TIME_CMPB_MASK 0x007fffffffffffff + +/* ==================================================================== */ +/* Register "SH_INT_CMPC" */ +/* RTC Compare Value for Processor C */ +/* ==================================================================== */ + +#define SH1_INT_CMPC 0x00000001101b0100 +#define SH2_INT_CMPC 0x00000000101b0100 +#define SH_INT_CMPC_MASK 0x007fffffffffffff +#define SH_INT_CMPC_INIT 0x0000000000000000 + +/* SH_INT_CMPC_REAL_TIME_CMPC */ +/* Description: Real Time Clock Compare */ +#define SH_INT_CMPC_REAL_TIME_CMPC_SHFT 0 +#define SH_INT_CMPC_REAL_TIME_CMPC_MASK 0x007fffffffffffff + +/* ==================================================================== */ +/* Register "SH_INT_CMPD" */ +/* RTC Compare Value for Processor D */ +/* ==================================================================== */ + +#define SH1_INT_CMPD 0x00000001101b0180 +#define SH2_INT_CMPD 0x00000000101b0180 +#define SH_INT_CMPD_MASK 0x007fffffffffffff +#define SH_INT_CMPD_INIT 0x0000000000000000 + +/* SH_INT_CMPD_REAL_TIME_CMPD */ +/* Description: Real Time Clock Compare */ +#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 +#define SH_INT_CMPD_REAL_TIME_CMPD_MASK 0x007fffffffffffff + + +/* ==================================================================== */ +/* Some MMRs are functionally identical (or close enough) on both SHUB1 */ +/* and SHUB2 that it makes sense to define a geberic name for the MMR. */ +/* It is acceptible to use (for example) SH_IPI_INT to reference the */ +/* the IPI MMR. The value of SH_IPI_INT is determined at runtime based */ +/* on the type of the SHUB. Do not use these #defines in performance */ +/* critical code or loops - there is a small performance penalty. */ +/* ==================================================================== */ +#define shubmmr(a,b) (is_shub2() ? a##2_##b : a##1_##b) + +#define SH_REAL_JUNK_BUS_LED0 shubmmr(SH, REAL_JUNK_BUS_LED0) +#define SH_IPI_INT shubmmr(SH, IPI_INT) +#define SH_EVENT_OCCURRED shubmmr(SH, EVENT_OCCURRED) +#define SH_EVENT_OCCURRED_ALIAS shubmmr(SH, EVENT_OCCURRED_ALIAS) +#define SH_RTC shubmmr(SH, RTC) +#define SH_RTC1_INT_CONFIG shubmmr(SH, RTC1_INT_CONFIG) +#define SH_RTC1_INT_ENABLE shubmmr(SH, RTC1_INT_ENABLE) +#define SH_RTC2_INT_CONFIG shubmmr(SH, RTC2_INT_CONFIG) +#define SH_RTC2_INT_ENABLE shubmmr(SH, RTC2_INT_ENABLE) +#define SH_RTC3_INT_CONFIG shubmmr(SH, RTC3_INT_CONFIG) +#define SH_RTC3_INT_ENABLE shubmmr(SH, RTC3_INT_ENABLE) +#define SH_INT_CMPB shubmmr(SH, INT_CMPB) +#define SH_INT_CMPC shubmmr(SH, INT_CMPC) +#define SH_INT_CMPD shubmmr(SH, INT_CMPD) + +#endif /* _ASM_IA64_SN_SHUB_MMR_H */ diff --git a/include/asm-ia64/sn/shubio.h b/include/asm-ia64/sn/shubio.h new file mode 100644 index 000000000000..fbd880e6bb96 --- /dev/null +++ b/include/asm-ia64/sn/shubio.h @@ -0,0 +1,3476 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_SHUBIO_H +#define _ASM_IA64_SN_SHUBIO_H + +#define HUB_WIDGET_ID_MAX 0xf +#define IIO_NUM_ITTES 7 +#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) + +#define IIO_WID 0x00400000 /* Crosstalk Widget Identification */ + /* This register is also accessible from + * Crosstalk at address 0x0. */ +#define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */ +#define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */ +#define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */ +#define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */ +#define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */ +#define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */ +#define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */ +#define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */ +#define IIO_ILLR 0x00400130 /* IO LLP Log Register */ +#define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */ + +#define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */ +#define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */ + +#define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */ +#define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */ + +#define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */ +#define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */ +#define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */ +#define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */ +#define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */ +#define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */ +#define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */ + +#define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */ +#define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */ +#define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */ +#define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */ +#define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */ +#define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */ +#define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */ +#define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */ +#define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */ + +#define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */ +#define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */ +#define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */ +#define IIO_IECLR 0x004001F8 /* IO Error Clear Register */ +#define IIO_IBCR 0x00400200 /* IO BTE Control Register */ + +#define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */ +#define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */ + +#define IIO_ILCT 0x00400218 /* IO LLP Channel Test */ + +#define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */ +#define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */ + + +#define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */ +#define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */ + +#define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */ +#define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */ +#define IIO_IWC 0x00400250 /* IO Wrapper Control Register */ +#define IIO_IWS 0x00400258 /* IO Wrapper Status Register */ +#define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */ + +#define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */ + +#define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */ +#define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */ +#define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */ +#define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */ +#define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */ +#define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */ +#define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */ +#define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */ + +#define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */ +#define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */ +#define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */ +#define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */ +#define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */ +#define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */ +#define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */ +#define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */ + +#define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */ +#define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */ +#define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */ +#define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */ +#define IIO_ICMR 0x004003A8 /* IO CRB Management Register */ +#define IIO_ICCR 0x004003B0 /* IO CRB Control Register */ +#define IIO_ICTO 0x004003B8 /* IO CRB Timeout */ +#define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */ + +#define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */ +#define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */ +#define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */ +#define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */ +#define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */ + +#define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */ +#define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */ +#define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */ +#define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */ +#define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */ + +#define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */ +#define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */ +#define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */ +#define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */ +#define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */ + +#define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */ +#define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */ +#define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */ +#define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */ +#define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */ + +#define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */ +#define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */ +#define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */ +#define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */ +#define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */ + +#define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */ +#define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */ +#define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */ +#define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */ +#define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */ + +#define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */ +#define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */ +#define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */ +#define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */ +#define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */ + +#define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */ +#define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */ +#define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */ +#define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */ +#define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */ + +#define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */ +#define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */ +#define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */ +#define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */ +#define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */ + +#define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */ +#define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */ +#define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */ +#define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */ +#define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */ + +#define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */ +#define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */ +#define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */ +#define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */ +#define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */ + +#define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */ +#define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */ +#define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */ +#define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */ +#define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */ + +#define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */ +#define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */ +#define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */ +#define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */ +#define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */ + +#define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */ +#define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */ +#define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */ +#define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */ +#define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */ + +#define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */ +#define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */ +#define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */ +#define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */ +#define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */ + +#define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */ +#define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */ +#define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */ + +#define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */ + +#define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */ +#define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */ +#define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */ +#define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */ +#define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */ +#define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */ +#define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */ +#define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */ +#define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */ +#define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */ +#define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */ +#define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */ + +#define IIO_IPCR 0x00430000 /* IO Performance Control */ +#define IIO_IPPR 0x00430008 /* IO Performance Profiling */ + + +/************************************************************************ + * * + * Description: This register echoes some information from the * + * LB_REV_ID register. It is available through Crosstalk as described * + * above. The REV_NUM and MFG_NUM fields receive their values from * + * the REVISION and MANUFACTURER fields in the LB_REV_ID register. * + * The PART_NUM field's value is the Crosstalk device ID number that * + * Steve Miller assigned to the SHub chip. * + * * + ************************************************************************/ + +typedef union ii_wid_u { + uint64_t ii_wid_regval; + struct { + uint64_t w_rsvd_1 : 1; + uint64_t w_mfg_num : 11; + uint64_t w_part_num : 16; + uint64_t w_rev_num : 4; + uint64_t w_rsvd : 32; + } ii_wid_fld_s; +} ii_wid_u_t; + + +/************************************************************************ + * * + * The fields in this register are set upon detection of an error * + * and cleared by various mechanisms, as explained in the * + * description. * + * * + ************************************************************************/ + +typedef union ii_wstat_u { + uint64_t ii_wstat_regval; + struct { + uint64_t w_pending : 4; + uint64_t w_xt_crd_to : 1; + uint64_t w_xt_tail_to : 1; + uint64_t w_rsvd_3 : 3; + uint64_t w_tx_mx_rty : 1; + uint64_t w_rsvd_2 : 6; + uint64_t w_llp_tx_cnt : 8; + uint64_t w_rsvd_1 : 8; + uint64_t w_crazy : 1; + uint64_t w_rsvd : 31; + } ii_wstat_fld_s; +} ii_wstat_u_t; + + +/************************************************************************ + * * + * Description: This is a read-write enabled register. It controls * + * various aspects of the Crosstalk flow control. * + * * + ************************************************************************/ + +typedef union ii_wcr_u { + uint64_t ii_wcr_regval; + struct { + uint64_t w_wid : 4; + uint64_t w_tag : 1; + uint64_t w_rsvd_1 : 8; + uint64_t w_dst_crd : 3; + uint64_t w_f_bad_pkt : 1; + uint64_t w_dir_con : 1; + uint64_t w_e_thresh : 5; + uint64_t w_rsvd : 41; + } ii_wcr_fld_s; +} ii_wcr_u_t; + + +/************************************************************************ + * * + * Description: This register's value is a bit vector that guards * + * access to local registers within the II as well as to external * + * Crosstalk widgets. Each bit in the register corresponds to a * + * particular region in the system; a region consists of one, two or * + * four nodes (depending on the value of the REGION_SIZE field in the * + * LB_REV_ID register, which is documented in Section 8.3.1.1). The * + * protection provided by this register applies to PIO read * + * operations as well as PIO write operations. The II will perform a * + * PIO read or write request only if the bit for the requestor's * + * region is set; otherwise, the II will not perform the requested * + * operation and will return an error response. When a PIO read or * + * write request targets an external Crosstalk widget, then not only * + * must the bit for the requestor's region be set in the ILAPR, but * + * also the target widget's bit in the IOWA register must be set in * + * order for the II to perform the requested operation; otherwise, * + * the II will return an error response. Hence, the protection * + * provided by the IOWA register supplements the protection provided * + * by the ILAPR for requests that target external Crosstalk widgets. * + * This register itself can be accessed only by the nodes whose * + * region ID bits are enabled in this same register. It can also be * + * accessed through the IAlias space by the local processors. * + * The reset value of this register allows access by all nodes. * + * * + ************************************************************************/ + +typedef union ii_ilapr_u { + uint64_t ii_ilapr_regval; + struct { + uint64_t i_region : 64; + } ii_ilapr_fld_s; +} ii_ilapr_u_t; + + + + +/************************************************************************ + * * + * Description: A write to this register of the 64-bit value * + * "SGIrules" in ASCII, will cause the bit in the ILAPR register * + * corresponding to the region of the requestor to be set (allow * + * access). A write of any other value will be ignored. Access * + * protection for this register is "SGIrules". * + * This register can also be accessed through the IAlias space. * + * However, this access will not change the access permissions in the * + * ILAPR. * + * * + ************************************************************************/ + +typedef union ii_ilapo_u { + uint64_t ii_ilapo_regval; + struct { + uint64_t i_io_ovrride : 64; + } ii_ilapo_fld_s; +} ii_ilapo_u_t; + + + +/************************************************************************ + * * + * This register qualifies all the PIO and Graphics writes launched * + * from the SHUB towards a widget. * + * * + ************************************************************************/ + +typedef union ii_iowa_u { + uint64_t ii_iowa_regval; + struct { + uint64_t i_w0_oac : 1; + uint64_t i_rsvd_1 : 7; + uint64_t i_wx_oac : 8; + uint64_t i_rsvd : 48; + } ii_iowa_fld_s; +} ii_iowa_u_t; + + +/************************************************************************ + * * + * Description: This register qualifies all the requests launched * + * from a widget towards the Shub. This register is intended to be * + * used by software in case of misbehaving widgets. * + * * + * * + ************************************************************************/ + +typedef union ii_iiwa_u { + uint64_t ii_iiwa_regval; + struct { + uint64_t i_w0_iac : 1; + uint64_t i_rsvd_1 : 7; + uint64_t i_wx_iac : 8; + uint64_t i_rsvd : 48; + } ii_iiwa_fld_s; +} ii_iiwa_u_t; + + + +/************************************************************************ + * * + * Description: This register qualifies all the operations launched * + * from a widget towards the SHub. It allows individual access * + * control for up to 8 devices per widget. A device refers to * + * individual DMA master hosted by a widget. * + * The bits in each field of this register are cleared by the Shub * + * upon detection of an error which requires the device to be * + * disabled. These fields assume that 0=TNUM=7 (i.e., Bridge-centric * + * Crosstalk). Whether or not a device has access rights to this * + * Shub is determined by an AND of the device enable bit in the * + * appropriate field of this register and the corresponding bit in * + * the Wx_IAC field (for the widget which this device belongs to). * + * The bits in this field are set by writing a 1 to them. Incoming * + * replies from Crosstalk are not subject to this access control * + * mechanism. * + * * + ************************************************************************/ + +typedef union ii_iidem_u { + uint64_t ii_iidem_regval; + struct { + uint64_t i_w8_dxs : 8; + uint64_t i_w9_dxs : 8; + uint64_t i_wa_dxs : 8; + uint64_t i_wb_dxs : 8; + uint64_t i_wc_dxs : 8; + uint64_t i_wd_dxs : 8; + uint64_t i_we_dxs : 8; + uint64_t i_wf_dxs : 8; + } ii_iidem_fld_s; +} ii_iidem_u_t; + + +/************************************************************************ + * * + * This register contains the various programmable fields necessary * + * for controlling and observing the LLP signals. * + * * + ************************************************************************/ + +typedef union ii_ilcsr_u { + uint64_t ii_ilcsr_regval; + struct { + uint64_t i_nullto : 6; + uint64_t i_rsvd_4 : 2; + uint64_t i_wrmrst : 1; + uint64_t i_rsvd_3 : 1; + uint64_t i_llp_en : 1; + uint64_t i_bm8 : 1; + uint64_t i_llp_stat : 2; + uint64_t i_remote_power : 1; + uint64_t i_rsvd_2 : 1; + uint64_t i_maxrtry : 10; + uint64_t i_d_avail_sel : 2; + uint64_t i_rsvd_1 : 4; + uint64_t i_maxbrst : 10; + uint64_t i_rsvd : 22; + + } ii_ilcsr_fld_s; +} ii_ilcsr_u_t; + + +/************************************************************************ + * * + * This is simply a status registers that monitors the LLP error * + * rate. * + * * + ************************************************************************/ + +typedef union ii_illr_u { + uint64_t ii_illr_regval; + struct { + uint64_t i_sn_cnt : 16; + uint64_t i_cb_cnt : 16; + uint64_t i_rsvd : 32; + } ii_illr_fld_s; +} ii_illr_u_t; + + +/************************************************************************ + * * + * Description: All II-detected non-BTE error interrupts are * + * specified via this register. * + * NOTE: The PI interrupt register address is hardcoded in the II. If * + * PI_ID==0, then the II sends an interrupt request (Duplonet PWRI * + * packet) to address offset 0x0180_0090 within the local register * + * address space of PI0 on the node specified by the NODE field. If * + * PI_ID==1, then the II sends the interrupt request to address * + * offset 0x01A0_0090 within the local register address space of PI1 * + * on the node specified by the NODE field. * + * * + ************************************************************************/ + +typedef union ii_iidsr_u { + uint64_t ii_iidsr_regval; + struct { + uint64_t i_level : 8; + uint64_t i_pi_id : 1; + uint64_t i_node : 11; + uint64_t i_rsvd_3 : 4; + uint64_t i_enable : 1; + uint64_t i_rsvd_2 : 3; + uint64_t i_int_sent : 2; + uint64_t i_rsvd_1 : 2; + uint64_t i_pi0_forward_int : 1; + uint64_t i_pi1_forward_int : 1; + uint64_t i_rsvd : 30; + } ii_iidsr_fld_s; +} ii_iidsr_u_t; + + + +/************************************************************************ + * * + * There are two instances of this register. This register is used * + * for matching up the incoming responses from the graphics widget to * + * the processor that initiated the graphics operation. The * + * write-responses are converted to graphics credits and returned to * + * the processor so that the processor interface can manage the flow * + * control. * + * * + ************************************************************************/ + +typedef union ii_igfx0_u { + uint64_t ii_igfx0_regval; + struct { + uint64_t i_w_num : 4; + uint64_t i_pi_id : 1; + uint64_t i_n_num : 12; + uint64_t i_p_num : 1; + uint64_t i_rsvd : 46; + } ii_igfx0_fld_s; +} ii_igfx0_u_t; + + +/************************************************************************ + * * + * There are two instances of this register. This register is used * + * for matching up the incoming responses from the graphics widget to * + * the processor that initiated the graphics operation. The * + * write-responses are converted to graphics credits and returned to * + * the processor so that the processor interface can manage the flow * + * control. * + * * + ************************************************************************/ + +typedef union ii_igfx1_u { + uint64_t ii_igfx1_regval; + struct { + uint64_t i_w_num : 4; + uint64_t i_pi_id : 1; + uint64_t i_n_num : 12; + uint64_t i_p_num : 1; + uint64_t i_rsvd : 46; + } ii_igfx1_fld_s; +} ii_igfx1_u_t; + + +/************************************************************************ + * * + * There are two instances of this registers. These registers are * + * used as scratch registers for software use. * + * * + ************************************************************************/ + +typedef union ii_iscr0_u { + uint64_t ii_iscr0_regval; + struct { + uint64_t i_scratch : 64; + } ii_iscr0_fld_s; +} ii_iscr0_u_t; + + + +/************************************************************************ + * * + * There are two instances of this registers. These registers are * + * used as scratch registers for software use. * + * * + ************************************************************************/ + +typedef union ii_iscr1_u { + uint64_t ii_iscr1_regval; + struct { + uint64_t i_scratch : 64; + } ii_iscr1_fld_s; +} ii_iscr1_u_t; + + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a Shub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the SHub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the Shub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte1_u { + uint64_t ii_itte1_regval; + struct { + uint64_t i_offset : 5; + uint64_t i_rsvd_1 : 3; + uint64_t i_w_num : 4; + uint64_t i_iosp : 1; + uint64_t i_rsvd : 51; + } ii_itte1_fld_s; +} ii_itte1_u_t; + + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a Shub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the Shub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the Shub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte2_u { + uint64_t ii_itte2_regval; + struct { + uint64_t i_offset : 5; + uint64_t i_rsvd_1 : 3; + uint64_t i_w_num : 4; + uint64_t i_iosp : 1; + uint64_t i_rsvd : 51; + } ii_itte2_fld_s; +} ii_itte2_u_t; + + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a Shub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the Shub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the SHub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte3_u { + uint64_t ii_itte3_regval; + struct { + uint64_t i_offset : 5; + uint64_t i_rsvd_1 : 3; + uint64_t i_w_num : 4; + uint64_t i_iosp : 1; + uint64_t i_rsvd : 51; + } ii_itte3_fld_s; +} ii_itte3_u_t; + + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a SHub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the SHub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the SHub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte4_u { + uint64_t ii_itte4_regval; + struct { + uint64_t i_offset : 5; + uint64_t i_rsvd_1 : 3; + uint64_t i_w_num : 4; + uint64_t i_iosp : 1; + uint64_t i_rsvd : 51; + } ii_itte4_fld_s; +} ii_itte4_u_t; + + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a SHub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the Shub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the Shub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte5_u { + uint64_t ii_itte5_regval; + struct { + uint64_t i_offset : 5; + uint64_t i_rsvd_1 : 3; + uint64_t i_w_num : 4; + uint64_t i_iosp : 1; + uint64_t i_rsvd : 51; + } ii_itte5_fld_s; +} ii_itte5_u_t; + + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a Shub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the Shub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the Shub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte6_u { + uint64_t ii_itte6_regval; + struct { + uint64_t i_offset : 5; + uint64_t i_rsvd_1 : 3; + uint64_t i_w_num : 4; + uint64_t i_iosp : 1; + uint64_t i_rsvd : 51; + } ii_itte6_fld_s; +} ii_itte6_u_t; + + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a Shub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the Shub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the SHub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte7_u { + uint64_t ii_itte7_regval; + struct { + uint64_t i_offset : 5; + uint64_t i_rsvd_1 : 3; + uint64_t i_w_num : 4; + uint64_t i_iosp : 1; + uint64_t i_rsvd : 51; + } ii_itte7_fld_s; +} ii_itte7_u_t; + + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprb0_u { + uint64_t ii_iprb0_regval; + struct { + uint64_t i_c : 8; + uint64_t i_na : 14; + uint64_t i_rsvd_2 : 2; + uint64_t i_nb : 14; + uint64_t i_rsvd_1 : 2; + uint64_t i_m : 2; + uint64_t i_f : 1; + uint64_t i_of_cnt : 5; + uint64_t i_error : 1; + uint64_t i_rd_to : 1; + uint64_t i_spur_wr : 1; + uint64_t i_spur_rd : 1; + uint64_t i_rsvd : 11; + uint64_t i_mult_err : 1; + } ii_iprb0_fld_s; +} ii_iprb0_u_t; + + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprb8_u { + uint64_t ii_iprb8_regval; + struct { + uint64_t i_c : 8; + uint64_t i_na : 14; + uint64_t i_rsvd_2 : 2; + uint64_t i_nb : 14; + uint64_t i_rsvd_1 : 2; + uint64_t i_m : 2; + uint64_t i_f : 1; + uint64_t i_of_cnt : 5; + uint64_t i_error : 1; + uint64_t i_rd_to : 1; + uint64_t i_spur_wr : 1; + uint64_t i_spur_rd : 1; + uint64_t i_rsvd : 11; + uint64_t i_mult_err : 1; + } ii_iprb8_fld_s; +} ii_iprb8_u_t; + + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprb9_u { + uint64_t ii_iprb9_regval; + struct { + uint64_t i_c : 8; + uint64_t i_na : 14; + uint64_t i_rsvd_2 : 2; + uint64_t i_nb : 14; + uint64_t i_rsvd_1 : 2; + uint64_t i_m : 2; + uint64_t i_f : 1; + uint64_t i_of_cnt : 5; + uint64_t i_error : 1; + uint64_t i_rd_to : 1; + uint64_t i_spur_wr : 1; + uint64_t i_spur_rd : 1; + uint64_t i_rsvd : 11; + uint64_t i_mult_err : 1; + } ii_iprb9_fld_s; +} ii_iprb9_u_t; + + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * * + * * + ************************************************************************/ + +typedef union ii_iprba_u { + uint64_t ii_iprba_regval; + struct { + uint64_t i_c : 8; + uint64_t i_na : 14; + uint64_t i_rsvd_2 : 2; + uint64_t i_nb : 14; + uint64_t i_rsvd_1 : 2; + uint64_t i_m : 2; + uint64_t i_f : 1; + uint64_t i_of_cnt : 5; + uint64_t i_error : 1; + uint64_t i_rd_to : 1; + uint64_t i_spur_wr : 1; + uint64_t i_spur_rd : 1; + uint64_t i_rsvd : 11; + uint64_t i_mult_err : 1; + } ii_iprba_fld_s; +} ii_iprba_u_t; + + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprbb_u { + uint64_t ii_iprbb_regval; + struct { + uint64_t i_c : 8; + uint64_t i_na : 14; + uint64_t i_rsvd_2 : 2; + uint64_t i_nb : 14; + uint64_t i_rsvd_1 : 2; + uint64_t i_m : 2; + uint64_t i_f : 1; + uint64_t i_of_cnt : 5; + uint64_t i_error : 1; + uint64_t i_rd_to : 1; + uint64_t i_spur_wr : 1; + uint64_t i_spur_rd : 1; + uint64_t i_rsvd : 11; + uint64_t i_mult_err : 1; + } ii_iprbb_fld_s; +} ii_iprbb_u_t; + + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprbc_u { + uint64_t ii_iprbc_regval; + struct { + uint64_t i_c : 8; + uint64_t i_na : 14; + uint64_t i_rsvd_2 : 2; + uint64_t i_nb : 14; + uint64_t i_rsvd_1 : 2; + uint64_t i_m : 2; + uint64_t i_f : 1; + uint64_t i_of_cnt : 5; + uint64_t i_error : 1; + uint64_t i_rd_to : 1; + uint64_t i_spur_wr : 1; + uint64_t i_spur_rd : 1; + uint64_t i_rsvd : 11; + uint64_t i_mult_err : 1; + } ii_iprbc_fld_s; +} ii_iprbc_u_t; + + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprbd_u { + uint64_t ii_iprbd_regval; + struct { + uint64_t i_c : 8; + uint64_t i_na : 14; + uint64_t i_rsvd_2 : 2; + uint64_t i_nb : 14; + uint64_t i_rsvd_1 : 2; + uint64_t i_m : 2; + uint64_t i_f : 1; + uint64_t i_of_cnt : 5; + uint64_t i_error : 1; + uint64_t i_rd_to : 1; + uint64_t i_spur_wr : 1; + uint64_t i_spur_rd : 1; + uint64_t i_rsvd : 11; + uint64_t i_mult_err : 1; + } ii_iprbd_fld_s; +} ii_iprbd_u_t; + + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprbe_u { + uint64_t ii_iprbe_regval; + struct { + uint64_t i_c : 8; + uint64_t i_na : 14; + uint64_t i_rsvd_2 : 2; + uint64_t i_nb : 14; + uint64_t i_rsvd_1 : 2; + uint64_t i_m : 2; + uint64_t i_f : 1; + uint64_t i_of_cnt : 5; + uint64_t i_error : 1; + uint64_t i_rd_to : 1; + uint64_t i_spur_wr : 1; + uint64_t i_spur_rd : 1; + uint64_t i_rsvd : 11; + uint64_t i_mult_err : 1; + } ii_iprbe_fld_s; +} ii_iprbe_u_t; + + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of Shub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprbf_u { + uint64_t ii_iprbf_regval; + struct { + uint64_t i_c : 8; + uint64_t i_na : 14; + uint64_t i_rsvd_2 : 2; + uint64_t i_nb : 14; + uint64_t i_rsvd_1 : 2; + uint64_t i_m : 2; + uint64_t i_f : 1; + uint64_t i_of_cnt : 5; + uint64_t i_error : 1; + uint64_t i_rd_to : 1; + uint64_t i_spur_wr : 1; + uint64_t i_spur_rd : 1; + uint64_t i_rsvd : 11; + uint64_t i_mult_err : 1; + } ii_iprbe_fld_s; +} ii_iprbf_u_t; + + +/************************************************************************ + * * + * This register specifies the timeout value to use for monitoring * + * Crosstalk credits which are used outbound to Crosstalk. An * + * internal counter called the Crosstalk Credit Timeout Counter * + * increments every 128 II clocks. The counter starts counting * + * anytime the credit count drops below a threshold, and resets to * + * zero (stops counting) anytime the credit count is at or above the * + * threshold. The threshold is 1 credit in direct connect mode and 2 * + * in Crossbow connect mode. When the internal Crosstalk Credit * + * Timeout Counter reaches the value programmed in this register, a * + * Crosstalk Credit Timeout has occurred. The internal counter is not * + * readable from software, and stops counting at its maximum value, * + * so it cannot cause more than one interrupt. * + * * + ************************************************************************/ + +typedef union ii_ixcc_u { + uint64_t ii_ixcc_regval; + struct { + uint64_t i_time_out : 26; + uint64_t i_rsvd : 38; + } ii_ixcc_fld_s; +} ii_ixcc_u_t; + + +/************************************************************************ + * * + * Description: This register qualifies all the PIO and DMA * + * operations launched from widget 0 towards the SHub. In * + * addition, it also qualifies accesses by the BTE streams. * + * The bits in each field of this register are cleared by the SHub * + * upon detection of an error which requires widget 0 or the BTE * + * streams to be terminated. Whether or not widget x has access * + * rights to this SHub is determined by an AND of the device * + * enable bit in the appropriate field of this register and bit 0 in * + * the Wx_IAC field. The bits in this field are set by writing a 1 to * + * them. Incoming replies from Crosstalk are not subject to this * + * access control mechanism. * + * * + ************************************************************************/ + +typedef union ii_imem_u { + uint64_t ii_imem_regval; + struct { + uint64_t i_w0_esd : 1; + uint64_t i_rsvd_3 : 3; + uint64_t i_b0_esd : 1; + uint64_t i_rsvd_2 : 3; + uint64_t i_b1_esd : 1; + uint64_t i_rsvd_1 : 3; + uint64_t i_clr_precise : 1; + uint64_t i_rsvd : 51; + } ii_imem_fld_s; +} ii_imem_u_t; + + + +/************************************************************************ + * * + * Description: This register specifies the timeout value to use for * + * monitoring Crosstalk tail flits coming into the Shub in the * + * TAIL_TO field. An internal counter associated with this register * + * is incremented every 128 II internal clocks (7 bits). The counter * + * starts counting anytime a header micropacket is received and stops * + * counting (and resets to zero) any time a micropacket with a Tail * + * bit is received. Once the counter reaches the threshold value * + * programmed in this register, it generates an interrupt to the * + * processor that is programmed into the IIDSR. The counter saturates * + * (does not roll over) at its maximum value, so it cannot cause * + * another interrupt until after it is cleared. * + * The register also contains the Read Response Timeout values. The * + * Prescalar is 23 bits, and counts II clocks. An internal counter * + * increments on every II clock and when it reaches the value in the * + * Prescalar field, all IPRTE registers with their valid bits set * + * have their Read Response timers bumped. Whenever any of them match * + * the value in the RRSP_TO field, a Read Response Timeout has * + * occurred, and error handling occurs as described in the Error * + * Handling section of this document. * + * * + ************************************************************************/ + +typedef union ii_ixtt_u { + uint64_t ii_ixtt_regval; + struct { + uint64_t i_tail_to : 26; + uint64_t i_rsvd_1 : 6; + uint64_t i_rrsp_ps : 23; + uint64_t i_rrsp_to : 5; + uint64_t i_rsvd : 4; + } ii_ixtt_fld_s; +} ii_ixtt_u_t; + + +/************************************************************************ + * * + * Writing a 1 to the fields of this register clears the appropriate * + * error bits in other areas of SHub. Note that when the * + * E_PRB_x bits are used to clear error bits in PRB registers, * + * SPUR_RD and SPUR_WR may persist, because they require additional * + * action to clear them. See the IPRBx and IXSS Register * + * specifications. * + * * + ************************************************************************/ + +typedef union ii_ieclr_u { + uint64_t ii_ieclr_regval; + struct { + uint64_t i_e_prb_0 : 1; + uint64_t i_rsvd : 7; + uint64_t i_e_prb_8 : 1; + uint64_t i_e_prb_9 : 1; + uint64_t i_e_prb_a : 1; + uint64_t i_e_prb_b : 1; + uint64_t i_e_prb_c : 1; + uint64_t i_e_prb_d : 1; + uint64_t i_e_prb_e : 1; + uint64_t i_e_prb_f : 1; + uint64_t i_e_crazy : 1; + uint64_t i_e_bte_0 : 1; + uint64_t i_e_bte_1 : 1; + uint64_t i_reserved_1 : 10; + uint64_t i_spur_rd_hdr : 1; + uint64_t i_cam_intr_to : 1; + uint64_t i_cam_overflow : 1; + uint64_t i_cam_read_miss : 1; + uint64_t i_ioq_rep_underflow : 1; + uint64_t i_ioq_req_underflow : 1; + uint64_t i_ioq_rep_overflow : 1; + uint64_t i_ioq_req_overflow : 1; + uint64_t i_iiq_rep_overflow : 1; + uint64_t i_iiq_req_overflow : 1; + uint64_t i_ii_xn_rep_cred_overflow : 1; + uint64_t i_ii_xn_req_cred_overflow : 1; + uint64_t i_ii_xn_invalid_cmd : 1; + uint64_t i_xn_ii_invalid_cmd : 1; + uint64_t i_reserved_2 : 21; + } ii_ieclr_fld_s; +} ii_ieclr_u_t; + + +/************************************************************************ + * * + * This register controls both BTEs. SOFT_RESET is intended for * + * recovery after an error. COUNT controls the total number of CRBs * + * that both BTEs (combined) can use, which affects total BTE * + * bandwidth. * + * * + ************************************************************************/ + +typedef union ii_ibcr_u { + uint64_t ii_ibcr_regval; + struct { + uint64_t i_count : 4; + uint64_t i_rsvd_1 : 4; + uint64_t i_soft_reset : 1; + uint64_t i_rsvd : 55; + } ii_ibcr_fld_s; +} ii_ibcr_u_t; + + +/************************************************************************ + * * + * This register contains the header of a spurious read response * + * received from Crosstalk. A spurious read response is defined as a * + * read response received by II from a widget for which (1) the SIDN * + * has a value between 1 and 7, inclusive (II never sends requests to * + * these widgets (2) there is no valid IPRTE register which * + * corresponds to the TNUM, or (3) the widget indicated in SIDN is * + * not the same as the widget recorded in the IPRTE register * + * referenced by the TNUM. If this condition is true, and if the * + * IXSS[VALID] bit is clear, then the header of the spurious read * + * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The * + * errant header is thereby captured, and no further spurious read * + * respones are captured until IXSS[VALID] is cleared by setting the * + * appropriate bit in IECLR.Everytime a spurious read response is * + * detected, the SPUR_RD bit of the PRB corresponding to the incoming * + * message's SIDN field is set. This always happens, regarless of * + * whether a header is captured. The programmer should check * + * IXSM[SIDN] to determine which widget sent the spurious response, * + * because there may be more than one SPUR_RD bit set in the PRB * + * registers. The widget indicated by IXSM[SIDN] was the first * + * spurious read response to be received since the last time * + * IXSS[VALID] was clear. The SPUR_RD bit of the corresponding PRB * + * will be set. Any SPUR_RD bits in any other PRB registers indicate * + * spurious messages from other widets which were detected after the * + * header was captured.. * + * * + ************************************************************************/ + +typedef union ii_ixsm_u { + uint64_t ii_ixsm_regval; + struct { + uint64_t i_byte_en : 32; + uint64_t i_reserved : 1; + uint64_t i_tag : 3; + uint64_t i_alt_pactyp : 4; + uint64_t i_bo : 1; + uint64_t i_error : 1; + uint64_t i_vbpm : 1; + uint64_t i_gbr : 1; + uint64_t i_ds : 2; + uint64_t i_ct : 1; + uint64_t i_tnum : 5; + uint64_t i_pactyp : 4; + uint64_t i_sidn : 4; + uint64_t i_didn : 4; + } ii_ixsm_fld_s; +} ii_ixsm_u_t; + + +/************************************************************************ + * * + * This register contains the sideband bits of a spurious read * + * response received from Crosstalk. * + * * + ************************************************************************/ + +typedef union ii_ixss_u { + uint64_t ii_ixss_regval; + struct { + uint64_t i_sideband : 8; + uint64_t i_rsvd : 55; + uint64_t i_valid : 1; + } ii_ixss_fld_s; +} ii_ixss_u_t; + + +/************************************************************************ + * * + * This register enables software to access the II LLP's test port. * + * Refer to the LLP 2.5 documentation for an explanation of the test * + * port. Software can write to this register to program the values * + * for the control fields (TestErrCapture, TestClear, TestFlit, * + * TestMask and TestSeed). Similarly, software can read from this * + * register to obtain the values of the test port's status outputs * + * (TestCBerr, TestValid and TestData). * + * * + ************************************************************************/ + +typedef union ii_ilct_u { + uint64_t ii_ilct_regval; + struct { + uint64_t i_test_seed : 20; + uint64_t i_test_mask : 8; + uint64_t i_test_data : 20; + uint64_t i_test_valid : 1; + uint64_t i_test_cberr : 1; + uint64_t i_test_flit : 3; + uint64_t i_test_clear : 1; + uint64_t i_test_err_capture : 1; + uint64_t i_rsvd : 9; + } ii_ilct_fld_s; +} ii_ilct_u_t; + + +/************************************************************************ + * * + * If the II detects an illegal incoming Duplonet packet (request or * + * reply) when VALID==0 in the IIEPH1 register, then it saves the * + * contents of the packet's header flit in the IIEPH1 and IIEPH2 * + * registers, sets the VALID bit in IIEPH1, clears the OVERRUN bit, * + * and assigns a value to the ERR_TYPE field which indicates the * + * specific nature of the error. The II recognizes four different * + * types of errors: short request packets (ERR_TYPE==2), short reply * + * packets (ERR_TYPE==3), long request packets (ERR_TYPE==4) and long * + * reply packets (ERR_TYPE==5). The encodings for these types of * + * errors were chosen to be consistent with the same types of errors * + * indicated by the ERR_TYPE field in the LB_ERROR_HDR1 register (in * + * the LB unit). If the II detects an illegal incoming Duplonet * + * packet when VALID==1 in the IIEPH1 register, then it merely sets * + * the OVERRUN bit to indicate that a subsequent error has happened, * + * and does nothing further. * + * * + ************************************************************************/ + +typedef union ii_iieph1_u { + uint64_t ii_iieph1_regval; + struct { + uint64_t i_command : 7; + uint64_t i_rsvd_5 : 1; + uint64_t i_suppl : 14; + uint64_t i_rsvd_4 : 1; + uint64_t i_source : 14; + uint64_t i_rsvd_3 : 1; + uint64_t i_err_type : 4; + uint64_t i_rsvd_2 : 4; + uint64_t i_overrun : 1; + uint64_t i_rsvd_1 : 3; + uint64_t i_valid : 1; + uint64_t i_rsvd : 13; + } ii_iieph1_fld_s; +} ii_iieph1_u_t; + + +/************************************************************************ + * * + * This register holds the Address field from the header flit of an * + * incoming erroneous Duplonet packet, along with the tail bit which * + * accompanied this header flit. This register is essentially an * + * extension of IIEPH1. Two registers were necessary because the 64 * + * bits available in only a single register were insufficient to * + * capture the entire header flit of an erroneous packet. * + * * + ************************************************************************/ + +typedef union ii_iieph2_u { + uint64_t ii_iieph2_regval; + struct { + uint64_t i_rsvd_0 : 3; + uint64_t i_address : 47; + uint64_t i_rsvd_1 : 10; + uint64_t i_tail : 1; + uint64_t i_rsvd : 3; + } ii_iieph2_fld_s; +} ii_iieph2_u_t; + + +/******************************/ + + + +/************************************************************************ + * * + * This register's value is a bit vector that guards access from SXBs * + * to local registers within the II as well as to external Crosstalk * + * widgets * + * * + ************************************************************************/ + +typedef union ii_islapr_u { + uint64_t ii_islapr_regval; + struct { + uint64_t i_region : 64; + } ii_islapr_fld_s; +} ii_islapr_u_t; + + +/************************************************************************ + * * + * A write to this register of the 56-bit value "Pup+Bun" will cause * + * the bit in the ISLAPR register corresponding to the region of the * + * requestor to be set (access allowed). ( + * * + ************************************************************************/ + +typedef union ii_islapo_u { + uint64_t ii_islapo_regval; + struct { + uint64_t i_io_sbx_ovrride : 56; + uint64_t i_rsvd : 8; + } ii_islapo_fld_s; +} ii_islapo_u_t; + +/************************************************************************ + * * + * Determines how long the wrapper will wait aftr an interrupt is * + * initially issued from the II before it times out the outstanding * + * interrupt and drops it from the interrupt queue. * + * * + ************************************************************************/ + +typedef union ii_iwi_u { + uint64_t ii_iwi_regval; + struct { + uint64_t i_prescale : 24; + uint64_t i_rsvd : 8; + uint64_t i_timeout : 8; + uint64_t i_rsvd1 : 8; + uint64_t i_intrpt_retry_period : 8; + uint64_t i_rsvd2 : 8; + } ii_iwi_fld_s; +} ii_iwi_u_t; + +/************************************************************************ + * * + * Log errors which have occurred in the II wrapper. The errors are * + * cleared by writing to the IECLR register. * + * * + ************************************************************************/ + +typedef union ii_iwel_u { + uint64_t ii_iwel_regval; + struct { + uint64_t i_intr_timed_out : 1; + uint64_t i_rsvd : 7; + uint64_t i_cam_overflow : 1; + uint64_t i_cam_read_miss : 1; + uint64_t i_rsvd1 : 2; + uint64_t i_ioq_rep_underflow : 1; + uint64_t i_ioq_req_underflow : 1; + uint64_t i_ioq_rep_overflow : 1; + uint64_t i_ioq_req_overflow : 1; + uint64_t i_iiq_rep_overflow : 1; + uint64_t i_iiq_req_overflow : 1; + uint64_t i_rsvd2 : 6; + uint64_t i_ii_xn_rep_cred_over_under: 1; + uint64_t i_ii_xn_req_cred_over_under: 1; + uint64_t i_rsvd3 : 6; + uint64_t i_ii_xn_invalid_cmd : 1; + uint64_t i_xn_ii_invalid_cmd : 1; + uint64_t i_rsvd4 : 30; + } ii_iwel_fld_s; +} ii_iwel_u_t; + +/************************************************************************ + * * + * Controls the II wrapper. * + * * + ************************************************************************/ + +typedef union ii_iwc_u { + uint64_t ii_iwc_regval; + struct { + uint64_t i_dma_byte_swap : 1; + uint64_t i_rsvd : 3; + uint64_t i_cam_read_lines_reset : 1; + uint64_t i_rsvd1 : 3; + uint64_t i_ii_xn_cred_over_under_log: 1; + uint64_t i_rsvd2 : 19; + uint64_t i_xn_rep_iq_depth : 5; + uint64_t i_rsvd3 : 3; + uint64_t i_xn_req_iq_depth : 5; + uint64_t i_rsvd4 : 3; + uint64_t i_iiq_depth : 6; + uint64_t i_rsvd5 : 12; + uint64_t i_force_rep_cred : 1; + uint64_t i_force_req_cred : 1; + } ii_iwc_fld_s; +} ii_iwc_u_t; + +/************************************************************************ + * * + * Status in the II wrapper. * + * * + ************************************************************************/ + +typedef union ii_iws_u { + uint64_t ii_iws_regval; + struct { + uint64_t i_xn_rep_iq_credits : 5; + uint64_t i_rsvd : 3; + uint64_t i_xn_req_iq_credits : 5; + uint64_t i_rsvd1 : 51; + } ii_iws_fld_s; +} ii_iws_u_t; + +/************************************************************************ + * * + * Masks errors in the IWEL register. * + * * + ************************************************************************/ + +typedef union ii_iweim_u { + uint64_t ii_iweim_regval; + struct { + uint64_t i_intr_timed_out : 1; + uint64_t i_rsvd : 7; + uint64_t i_cam_overflow : 1; + uint64_t i_cam_read_miss : 1; + uint64_t i_rsvd1 : 2; + uint64_t i_ioq_rep_underflow : 1; + uint64_t i_ioq_req_underflow : 1; + uint64_t i_ioq_rep_overflow : 1; + uint64_t i_ioq_req_overflow : 1; + uint64_t i_iiq_rep_overflow : 1; + uint64_t i_iiq_req_overflow : 1; + uint64_t i_rsvd2 : 6; + uint64_t i_ii_xn_rep_cred_overflow : 1; + uint64_t i_ii_xn_req_cred_overflow : 1; + uint64_t i_rsvd3 : 6; + uint64_t i_ii_xn_invalid_cmd : 1; + uint64_t i_xn_ii_invalid_cmd : 1; + uint64_t i_rsvd4 : 30; + } ii_iweim_fld_s; +} ii_iweim_u_t; + + +/************************************************************************ + * * + * A write to this register causes a particular field in the * + * corresponding widget's PRB entry to be adjusted up or down by 1. * + * This counter should be used when recovering from error and reset * + * conditions. Note that software would be capable of causing * + * inadvertent overflow or underflow of these counters. * + * * + ************************************************************************/ + +typedef union ii_ipca_u { + uint64_t ii_ipca_regval; + struct { + uint64_t i_wid : 4; + uint64_t i_adjust : 1; + uint64_t i_rsvd_1 : 3; + uint64_t i_field : 2; + uint64_t i_rsvd : 54; + } ii_ipca_fld_s; +} ii_ipca_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + + +typedef union ii_iprte0a_u { + uint64_t ii_iprte0a_regval; + struct { + uint64_t i_rsvd_1 : 54; + uint64_t i_widget : 4; + uint64_t i_to_cnt : 5; + uint64_t i_vld : 1; + } ii_iprte0a_fld_s; +} ii_iprte0a_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte1a_u { + uint64_t ii_iprte1a_regval; + struct { + uint64_t i_rsvd_1 : 54; + uint64_t i_widget : 4; + uint64_t i_to_cnt : 5; + uint64_t i_vld : 1; + } ii_iprte1a_fld_s; +} ii_iprte1a_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte2a_u { + uint64_t ii_iprte2a_regval; + struct { + uint64_t i_rsvd_1 : 54; + uint64_t i_widget : 4; + uint64_t i_to_cnt : 5; + uint64_t i_vld : 1; + } ii_iprte2a_fld_s; +} ii_iprte2a_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte3a_u { + uint64_t ii_iprte3a_regval; + struct { + uint64_t i_rsvd_1 : 54; + uint64_t i_widget : 4; + uint64_t i_to_cnt : 5; + uint64_t i_vld : 1; + } ii_iprte3a_fld_s; +} ii_iprte3a_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte4a_u { + uint64_t ii_iprte4a_regval; + struct { + uint64_t i_rsvd_1 : 54; + uint64_t i_widget : 4; + uint64_t i_to_cnt : 5; + uint64_t i_vld : 1; + } ii_iprte4a_fld_s; +} ii_iprte4a_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte5a_u { + uint64_t ii_iprte5a_regval; + struct { + uint64_t i_rsvd_1 : 54; + uint64_t i_widget : 4; + uint64_t i_to_cnt : 5; + uint64_t i_vld : 1; + } ii_iprte5a_fld_s; +} ii_iprte5a_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte6a_u { + uint64_t ii_iprte6a_regval; + struct { + uint64_t i_rsvd_1 : 54; + uint64_t i_widget : 4; + uint64_t i_to_cnt : 5; + uint64_t i_vld : 1; + } ii_iprte6a_fld_s; +} ii_iprte6a_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte7a_u { + uint64_t ii_iprte7a_regval; + struct { + uint64_t i_rsvd_1 : 54; + uint64_t i_widget : 4; + uint64_t i_to_cnt : 5; + uint64_t i_vld : 1; + } ii_iprtea7_fld_s; +} ii_iprte7a_u_t; + + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + + +typedef union ii_iprte0b_u { + uint64_t ii_iprte0b_regval; + struct { + uint64_t i_rsvd_1 : 3; + uint64_t i_address : 47; + uint64_t i_init : 3; + uint64_t i_source : 11; + } ii_iprte0b_fld_s; +} ii_iprte0b_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte1b_u { + uint64_t ii_iprte1b_regval; + struct { + uint64_t i_rsvd_1 : 3; + uint64_t i_address : 47; + uint64_t i_init : 3; + uint64_t i_source : 11; + } ii_iprte1b_fld_s; +} ii_iprte1b_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte2b_u { + uint64_t ii_iprte2b_regval; + struct { + uint64_t i_rsvd_1 : 3; + uint64_t i_address : 47; + uint64_t i_init : 3; + uint64_t i_source : 11; + } ii_iprte2b_fld_s; +} ii_iprte2b_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte3b_u { + uint64_t ii_iprte3b_regval; + struct { + uint64_t i_rsvd_1 : 3; + uint64_t i_address : 47; + uint64_t i_init : 3; + uint64_t i_source : 11; + } ii_iprte3b_fld_s; +} ii_iprte3b_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte4b_u { + uint64_t ii_iprte4b_regval; + struct { + uint64_t i_rsvd_1 : 3; + uint64_t i_address : 47; + uint64_t i_init : 3; + uint64_t i_source : 11; + } ii_iprte4b_fld_s; +} ii_iprte4b_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte5b_u { + uint64_t ii_iprte5b_regval; + struct { + uint64_t i_rsvd_1 : 3; + uint64_t i_address : 47; + uint64_t i_init : 3; + uint64_t i_source : 11; + } ii_iprte5b_fld_s; +} ii_iprte5b_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte6b_u { + uint64_t ii_iprte6b_regval; + struct { + uint64_t i_rsvd_1 : 3; + uint64_t i_address : 47; + uint64_t i_init : 3; + uint64_t i_source : 11; + + } ii_iprte6b_fld_s; +} ii_iprte6b_u_t; + + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte7b_u { + uint64_t ii_iprte7b_regval; + struct { + uint64_t i_rsvd_1 : 3; + uint64_t i_address : 47; + uint64_t i_init : 3; + uint64_t i_source : 11; + } ii_iprte7b_fld_s; +} ii_iprte7b_u_t; + + +/************************************************************************ + * * + * Description: SHub II contains a feature which did not exist in * + * the Hub which automatically cleans up after a Read Response * + * timeout, including deallocation of the IPRTE and recovery of IBuf * + * space. The inclusion of this register in SHub is for backward * + * compatibility * + * A write to this register causes an entry from the table of * + * outstanding PIO Read Requests to be freed and returned to the * + * stack of free entries. This register is used in handling the * + * timeout errors that result in a PIO Reply never returning from * + * Crosstalk. * + * Note that this register does not affect the contents of the IPRTE * + * registers. The Valid bits in those registers have to be * + * specifically turned off by software. * + * * + ************************************************************************/ + +typedef union ii_ipdr_u { + uint64_t ii_ipdr_regval; + struct { + uint64_t i_te : 3; + uint64_t i_rsvd_1 : 1; + uint64_t i_pnd : 1; + uint64_t i_init_rpcnt : 1; + uint64_t i_rsvd : 58; + } ii_ipdr_fld_s; +} ii_ipdr_u_t; + + +/************************************************************************ + * * + * A write to this register causes a CRB entry to be returned to the * + * queue of free CRBs. The entry should have previously been cleared * + * (mark bit) via backdoor access to the pertinent CRB entry. This * + * register is used in the last step of handling the errors that are * + * captured and marked in CRB entries. Briefly: 1) first error for * + * DMA write from a particular device, and first error for a * + * particular BTE stream, lead to a marked CRB entry, and processor * + * interrupt, 2) software reads the error information captured in the * + * CRB entry, and presumably takes some corrective action, 3) * + * software clears the mark bit, and finally 4) software writes to * + * the ICDR register to return the CRB entry to the list of free CRB * + * entries. * + * * + ************************************************************************/ + +typedef union ii_icdr_u { + uint64_t ii_icdr_regval; + struct { + uint64_t i_crb_num : 4; + uint64_t i_pnd : 1; + uint64_t i_rsvd : 59; + } ii_icdr_fld_s; +} ii_icdr_u_t; + + +/************************************************************************ + * * + * This register provides debug access to two FIFOs inside of II. * + * Both IOQ_MAX* fields of this register contain the instantaneous * + * depth (in units of the number of available entries) of the * + * associated IOQ FIFO. A read of this register will return the * + * number of free entries on each FIFO at the time of the read. So * + * when a FIFO is idle, the associated field contains the maximum * + * depth of the FIFO. This register is writable for debug reasons * + * and is intended to be written with the maximum desired FIFO depth * + * while the FIFO is idle. Software must assure that II is idle when * + * this register is written. If there are any active entries in any * + * of these FIFOs when this register is written, the results are * + * undefined. * + * * + ************************************************************************/ + +typedef union ii_ifdr_u { + uint64_t ii_ifdr_regval; + struct { + uint64_t i_ioq_max_rq : 7; + uint64_t i_set_ioq_rq : 1; + uint64_t i_ioq_max_rp : 7; + uint64_t i_set_ioq_rp : 1; + uint64_t i_rsvd : 48; + } ii_ifdr_fld_s; +} ii_ifdr_u_t; + + +/************************************************************************ + * * + * This register allows the II to become sluggish in removing * + * messages from its inbound queue (IIQ). This will cause messages to * + * back up in either virtual channel. Disabling the "molasses" mode * + * subsequently allows the II to be tested under stress. In the * + * sluggish ("Molasses") mode, the localized effects of congestion * + * can be observed. * + * * + ************************************************************************/ + +typedef union ii_iiap_u { + uint64_t ii_iiap_regval; + struct { + uint64_t i_rq_mls : 6; + uint64_t i_rsvd_1 : 2; + uint64_t i_rp_mls : 6; + uint64_t i_rsvd : 50; + } ii_iiap_fld_s; +} ii_iiap_u_t; + + +/************************************************************************ + * * + * This register allows several parameters of CRB operation to be * + * set. Note that writing to this register can have catastrophic side * + * effects, if the CRB is not quiescent, i.e. if the CRB is * + * processing protocol messages when the write occurs. * + * * + ************************************************************************/ + +typedef union ii_icmr_u { + uint64_t ii_icmr_regval; + struct { + uint64_t i_sp_msg : 1; + uint64_t i_rd_hdr : 1; + uint64_t i_rsvd_4 : 2; + uint64_t i_c_cnt : 4; + uint64_t i_rsvd_3 : 4; + uint64_t i_clr_rqpd : 1; + uint64_t i_clr_rppd : 1; + uint64_t i_rsvd_2 : 2; + uint64_t i_fc_cnt : 4; + uint64_t i_crb_vld : 15; + uint64_t i_crb_mark : 15; + uint64_t i_rsvd_1 : 2; + uint64_t i_precise : 1; + uint64_t i_rsvd : 11; + } ii_icmr_fld_s; +} ii_icmr_u_t; + + +/************************************************************************ + * * + * This register allows control of the table portion of the CRB * + * logic via software. Control operations from this register have * + * priority over all incoming Crosstalk or BTE requests. * + * * + ************************************************************************/ + +typedef union ii_iccr_u { + uint64_t ii_iccr_regval; + struct { + uint64_t i_crb_num : 4; + uint64_t i_rsvd_1 : 4; + uint64_t i_cmd : 8; + uint64_t i_pending : 1; + uint64_t i_rsvd : 47; + } ii_iccr_fld_s; +} ii_iccr_u_t; + + +/************************************************************************ + * * + * This register allows the maximum timeout value to be programmed. * + * * + ************************************************************************/ + +typedef union ii_icto_u { + uint64_t ii_icto_regval; + struct { + uint64_t i_timeout : 8; + uint64_t i_rsvd : 56; + } ii_icto_fld_s; +} ii_icto_u_t; + + +/************************************************************************ + * * + * This register allows the timeout prescalar to be programmed. An * + * internal counter is associated with this register. When the * + * internal counter reaches the value of the PRESCALE field, the * + * timer registers in all valid CRBs are incremented (CRBx_D[TIMEOUT] * + * field). The internal counter resets to zero, and then continues * + * counting. * + * * + ************************************************************************/ + +typedef union ii_ictp_u { + uint64_t ii_ictp_regval; + struct { + uint64_t i_prescale : 24; + uint64_t i_rsvd : 40; + } ii_ictp_fld_s; +} ii_ictp_u_t; + + +/************************************************************************ + * * + * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * + * used for Crosstalk operations (both cacheline and partial * + * operations) or BTE/IO. Because the CRB entries are very wide, five * + * registers (_A to _E) are required to read and write each entry. * + * The CRB Entry registers can be conceptualized as rows and columns * + * (illustrated in the table above). Each row contains the 4 * + * registers required for a single CRB Entry. The first doubleword * + * (column) for each entry is labeled A, and the second doubleword * + * (higher address) is labeled B, the third doubleword is labeled C, * + * the fourth doubleword is labeled D and the fifth doubleword is * + * labeled E. All CRB entries have their addresses on a quarter * + * cacheline aligned boundary. * + * Upon reset, only the following fields are initialized: valid * + * (VLD), priority count, timeout, timeout valid, and context valid. * + * All other bits should be cleared by software before use (after * + * recovering any potential error state from before the reset). * + * The following four tables summarize the format for the four * + * registers that are used for each ICRB# Entry. * + * * + ************************************************************************/ + +typedef union ii_icrb0_a_u { + uint64_t ii_icrb0_a_regval; + struct { + uint64_t ia_iow : 1; + uint64_t ia_vld : 1; + uint64_t ia_addr : 47; + uint64_t ia_tnum : 5; + uint64_t ia_sidn : 4; + uint64_t ia_rsvd : 6; + } ii_icrb0_a_fld_s; +} ii_icrb0_a_u_t; + + +/************************************************************************ + * * + * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * + * used for Crosstalk operations (both cacheline and partial * + * operations) or BTE/IO. Because the CRB entries are very wide, five * + * registers (_A to _E) are required to read and write each entry. * + * * + ************************************************************************/ + +typedef union ii_icrb0_b_u { + uint64_t ii_icrb0_b_regval; + struct { + uint64_t ib_xt_err : 1; + uint64_t ib_mark : 1; + uint64_t ib_ln_uce : 1; + uint64_t ib_errcode : 3; + uint64_t ib_error : 1; + uint64_t ib_stall__bte_1 : 1; + uint64_t ib_stall__bte_0 : 1; + uint64_t ib_stall__intr : 1; + uint64_t ib_stall_ib : 1; + uint64_t ib_intvn : 1; + uint64_t ib_wb : 1; + uint64_t ib_hold : 1; + uint64_t ib_ack : 1; + uint64_t ib_resp : 1; + uint64_t ib_ack_cnt : 11; + uint64_t ib_rsvd : 7; + uint64_t ib_exc : 5; + uint64_t ib_init : 3; + uint64_t ib_imsg : 8; + uint64_t ib_imsgtype : 2; + uint64_t ib_use_old : 1; + uint64_t ib_rsvd_1 : 11; + } ii_icrb0_b_fld_s; +} ii_icrb0_b_u_t; + + +/************************************************************************ + * * + * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * + * used for Crosstalk operations (both cacheline and partial * + * operations) or BTE/IO. Because the CRB entries are very wide, five * + * registers (_A to _E) are required to read and write each entry. * + * * + ************************************************************************/ + +typedef union ii_icrb0_c_u { + uint64_t ii_icrb0_c_regval; + struct { + uint64_t ic_source : 15; + uint64_t ic_size : 2; + uint64_t ic_ct : 1; + uint64_t ic_bte_num : 1; + uint64_t ic_gbr : 1; + uint64_t ic_resprqd : 1; + uint64_t ic_bo : 1; + uint64_t ic_suppl : 15; + uint64_t ic_rsvd : 27; + } ii_icrb0_c_fld_s; +} ii_icrb0_c_u_t; + + +/************************************************************************ + * * + * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * + * used for Crosstalk operations (both cacheline and partial * + * operations) or BTE/IO. Because the CRB entries are very wide, five * + * registers (_A to _E) are required to read and write each entry. * + * * + ************************************************************************/ + +typedef union ii_icrb0_d_u { + uint64_t ii_icrb0_d_regval; + struct { + uint64_t id_pa_be : 43; + uint64_t id_bte_op : 1; + uint64_t id_pr_psc : 4; + uint64_t id_pr_cnt : 4; + uint64_t id_sleep : 1; + uint64_t id_rsvd : 11; + } ii_icrb0_d_fld_s; +} ii_icrb0_d_u_t; + + +/************************************************************************ + * * + * Description: There are 15 CRB Entries (ICRB0 to ICRBE) that are * + * used for Crosstalk operations (both cacheline and partial * + * operations) or BTE/IO. Because the CRB entries are very wide, five * + * registers (_A to _E) are required to read and write each entry. * + * * + ************************************************************************/ + +typedef union ii_icrb0_e_u { + uint64_t ii_icrb0_e_regval; + struct { + uint64_t ie_timeout : 8; + uint64_t ie_context : 15; + uint64_t ie_rsvd : 1; + uint64_t ie_tvld : 1; + uint64_t ie_cvld : 1; + uint64_t ie_rsvd_0 : 38; + } ii_icrb0_e_fld_s; +} ii_icrb0_e_u_t; + + +/************************************************************************ + * * + * This register contains the lower 64 bits of the header of the * + * spurious message captured by II. Valid when the SP_MSG bit in ICMR * + * register is set. * + * * + ************************************************************************/ + +typedef union ii_icsml_u { + uint64_t ii_icsml_regval; + struct { + uint64_t i_tt_addr : 47; + uint64_t i_newsuppl_ex : 14; + uint64_t i_reserved : 2; + uint64_t i_overflow : 1; + } ii_icsml_fld_s; +} ii_icsml_u_t; + + +/************************************************************************ + * * + * This register contains the middle 64 bits of the header of the * + * spurious message captured by II. Valid when the SP_MSG bit in ICMR * + * register is set. * + * * + ************************************************************************/ + +typedef union ii_icsmm_u { + uint64_t ii_icsmm_regval; + struct { + uint64_t i_tt_ack_cnt : 11; + uint64_t i_reserved : 53; + } ii_icsmm_fld_s; +} ii_icsmm_u_t; + + +/************************************************************************ + * * + * This register contains the microscopic state, all the inputs to * + * the protocol table, captured with the spurious message. Valid when * + * the SP_MSG bit in the ICMR register is set. * + * * + ************************************************************************/ + +typedef union ii_icsmh_u { + uint64_t ii_icsmh_regval; + struct { + uint64_t i_tt_vld : 1; + uint64_t i_xerr : 1; + uint64_t i_ft_cwact_o : 1; + uint64_t i_ft_wact_o : 1; + uint64_t i_ft_active_o : 1; + uint64_t i_sync : 1; + uint64_t i_mnusg : 1; + uint64_t i_mnusz : 1; + uint64_t i_plusz : 1; + uint64_t i_plusg : 1; + uint64_t i_tt_exc : 5; + uint64_t i_tt_wb : 1; + uint64_t i_tt_hold : 1; + uint64_t i_tt_ack : 1; + uint64_t i_tt_resp : 1; + uint64_t i_tt_intvn : 1; + uint64_t i_g_stall_bte1 : 1; + uint64_t i_g_stall_bte0 : 1; + uint64_t i_g_stall_il : 1; + uint64_t i_g_stall_ib : 1; + uint64_t i_tt_imsg : 8; + uint64_t i_tt_imsgtype : 2; + uint64_t i_tt_use_old : 1; + uint64_t i_tt_respreqd : 1; + uint64_t i_tt_bte_num : 1; + uint64_t i_cbn : 1; + uint64_t i_match : 1; + uint64_t i_rpcnt_lt_34 : 1; + uint64_t i_rpcnt_ge_34 : 1; + uint64_t i_rpcnt_lt_18 : 1; + uint64_t i_rpcnt_ge_18 : 1; + uint64_t i_rpcnt_lt_2 : 1; + uint64_t i_rpcnt_ge_2 : 1; + uint64_t i_rqcnt_lt_18 : 1; + uint64_t i_rqcnt_ge_18 : 1; + uint64_t i_rqcnt_lt_2 : 1; + uint64_t i_rqcnt_ge_2 : 1; + uint64_t i_tt_device : 7; + uint64_t i_tt_init : 3; + uint64_t i_reserved : 5; + } ii_icsmh_fld_s; +} ii_icsmh_u_t; + + +/************************************************************************ + * * + * The Shub DEBUG unit provides a 3-bit selection signal to the * + * II core and a 3-bit selection signal to the fsbclk domain in the II * + * wrapper. * + * * + ************************************************************************/ + +typedef union ii_idbss_u { + uint64_t ii_idbss_regval; + struct { + uint64_t i_iioclk_core_submenu : 3; + uint64_t i_rsvd : 5; + uint64_t i_fsbclk_wrapper_submenu : 3; + uint64_t i_rsvd_1 : 5; + uint64_t i_iioclk_menu : 5; + uint64_t i_rsvd_2 : 43; + } ii_idbss_fld_s; +} ii_idbss_u_t; + + +/************************************************************************ + * * + * Description: This register is used to set up the length for a * + * transfer and then to monitor the progress of that transfer. This * + * register needs to be initialized before a transfer is started. A * + * legitimate write to this register will set the Busy bit, clear the * + * Error bit, and initialize the length to the value desired. * + * While the transfer is in progress, hardware will decrement the * + * length field with each successful block that is copied. Once the * + * transfer completes, hardware will clear the Busy bit. The length * + * field will also contain the number of cache lines left to be * + * transferred. * + * * + ************************************************************************/ + +typedef union ii_ibls0_u { + uint64_t ii_ibls0_regval; + struct { + uint64_t i_length : 16; + uint64_t i_error : 1; + uint64_t i_rsvd_1 : 3; + uint64_t i_busy : 1; + uint64_t i_rsvd : 43; + } ii_ibls0_fld_s; +} ii_ibls0_u_t; + + +/************************************************************************ + * * + * This register should be loaded before a transfer is started. The * + * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * + * address as described in Section 1.3, Figure2 and Figure3. Since * + * the bottom 7 bits of the address are always taken to be zero, BTE * + * transfers are always cacheline-aligned. * + * * + ************************************************************************/ + +typedef union ii_ibsa0_u { + uint64_t ii_ibsa0_regval; + struct { + uint64_t i_rsvd_1 : 7; + uint64_t i_addr : 42; + uint64_t i_rsvd : 15; + } ii_ibsa0_fld_s; +} ii_ibsa0_u_t; + + +/************************************************************************ + * * + * This register should be loaded before a transfer is started. The * + * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * + * address as described in Section 1.3, Figure2 and Figure3. Since * + * the bottom 7 bits of the address are always taken to be zero, BTE * + * transfers are always cacheline-aligned. * + * * + ************************************************************************/ + +typedef union ii_ibda0_u { + uint64_t ii_ibda0_regval; + struct { + uint64_t i_rsvd_1 : 7; + uint64_t i_addr : 42; + uint64_t i_rsvd : 15; + } ii_ibda0_fld_s; +} ii_ibda0_u_t; + + +/************************************************************************ + * * + * Writing to this register sets up the attributes of the transfer * + * and initiates the transfer operation. Reading this register has * + * the side effect of terminating any transfer in progress. Note: * + * stopping a transfer midstream could have an adverse impact on the * + * other BTE. If a BTE stream has to be stopped (due to error * + * handling for example), both BTE streams should be stopped and * + * their transfers discarded. * + * * + ************************************************************************/ + +typedef union ii_ibct0_u { + uint64_t ii_ibct0_regval; + struct { + uint64_t i_zerofill : 1; + uint64_t i_rsvd_2 : 3; + uint64_t i_notify : 1; + uint64_t i_rsvd_1 : 3; + uint64_t i_poison : 1; + uint64_t i_rsvd : 55; + } ii_ibct0_fld_s; +} ii_ibct0_u_t; + + +/************************************************************************ + * * + * This register contains the address to which the WINV is sent. * + * This address has to be cache line aligned. * + * * + ************************************************************************/ + +typedef union ii_ibna0_u { + uint64_t ii_ibna0_regval; + struct { + uint64_t i_rsvd_1 : 7; + uint64_t i_addr : 42; + uint64_t i_rsvd : 15; + } ii_ibna0_fld_s; +} ii_ibna0_u_t; + + +/************************************************************************ + * * + * This register contains the programmable level as well as the node * + * ID and PI unit of the processor to which the interrupt will be * + * sent. * + * * + ************************************************************************/ + +typedef union ii_ibia0_u { + uint64_t ii_ibia0_regval; + struct { + uint64_t i_rsvd_2 : 1; + uint64_t i_node_id : 11; + uint64_t i_rsvd_1 : 4; + uint64_t i_level : 7; + uint64_t i_rsvd : 41; + } ii_ibia0_fld_s; +} ii_ibia0_u_t; + + +/************************************************************************ + * * + * Description: This register is used to set up the length for a * + * transfer and then to monitor the progress of that transfer. This * + * register needs to be initialized before a transfer is started. A * + * legitimate write to this register will set the Busy bit, clear the * + * Error bit, and initialize the length to the value desired. * + * While the transfer is in progress, hardware will decrement the * + * length field with each successful block that is copied. Once the * + * transfer completes, hardware will clear the Busy bit. The length * + * field will also contain the number of cache lines left to be * + * transferred. * + * * + ************************************************************************/ + +typedef union ii_ibls1_u { + uint64_t ii_ibls1_regval; + struct { + uint64_t i_length : 16; + uint64_t i_error : 1; + uint64_t i_rsvd_1 : 3; + uint64_t i_busy : 1; + uint64_t i_rsvd : 43; + } ii_ibls1_fld_s; +} ii_ibls1_u_t; + + +/************************************************************************ + * * + * This register should be loaded before a transfer is started. The * + * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * + * address as described in Section 1.3, Figure2 and Figure3. Since * + * the bottom 7 bits of the address are always taken to be zero, BTE * + * transfers are always cacheline-aligned. * + * * + ************************************************************************/ + +typedef union ii_ibsa1_u { + uint64_t ii_ibsa1_regval; + struct { + uint64_t i_rsvd_1 : 7; + uint64_t i_addr : 33; + uint64_t i_rsvd : 24; + } ii_ibsa1_fld_s; +} ii_ibsa1_u_t; + + +/************************************************************************ + * * + * This register should be loaded before a transfer is started. The * + * address to be loaded in bits 39:0 is the 40-bit TRex+ physical * + * address as described in Section 1.3, Figure2 and Figure3. Since * + * the bottom 7 bits of the address are always taken to be zero, BTE * + * transfers are always cacheline-aligned. * + * * + ************************************************************************/ + +typedef union ii_ibda1_u { + uint64_t ii_ibda1_regval; + struct { + uint64_t i_rsvd_1 : 7; + uint64_t i_addr : 33; + uint64_t i_rsvd : 24; + } ii_ibda1_fld_s; +} ii_ibda1_u_t; + + +/************************************************************************ + * * + * Writing to this register sets up the attributes of the transfer * + * and initiates the transfer operation. Reading this register has * + * the side effect of terminating any transfer in progress. Note: * + * stopping a transfer midstream could have an adverse impact on the * + * other BTE. If a BTE stream has to be stopped (due to error * + * handling for example), both BTE streams should be stopped and * + * their transfers discarded. * + * * + ************************************************************************/ + +typedef union ii_ibct1_u { + uint64_t ii_ibct1_regval; + struct { + uint64_t i_zerofill : 1; + uint64_t i_rsvd_2 : 3; + uint64_t i_notify : 1; + uint64_t i_rsvd_1 : 3; + uint64_t i_poison : 1; + uint64_t i_rsvd : 55; + } ii_ibct1_fld_s; +} ii_ibct1_u_t; + + +/************************************************************************ + * * + * This register contains the address to which the WINV is sent. * + * This address has to be cache line aligned. * + * * + ************************************************************************/ + +typedef union ii_ibna1_u { + uint64_t ii_ibna1_regval; + struct { + uint64_t i_rsvd_1 : 7; + uint64_t i_addr : 33; + uint64_t i_rsvd : 24; + } ii_ibna1_fld_s; +} ii_ibna1_u_t; + + +/************************************************************************ + * * + * This register contains the programmable level as well as the node * + * ID and PI unit of the processor to which the interrupt will be * + * sent. * + * * + ************************************************************************/ + +typedef union ii_ibia1_u { + uint64_t ii_ibia1_regval; + struct { + uint64_t i_pi_id : 1; + uint64_t i_node_id : 8; + uint64_t i_rsvd_1 : 7; + uint64_t i_level : 7; + uint64_t i_rsvd : 41; + } ii_ibia1_fld_s; +} ii_ibia1_u_t; + + +/************************************************************************ + * * + * This register defines the resources that feed information into * + * the two performance counters located in the IO Performance * + * Profiling Register. There are 17 different quantities that can be * + * measured. Given these 17 different options, the two performance * + * counters have 15 of them in common; menu selections 0 through 0xE * + * are identical for each performance counter. As for the other two * + * options, one is available from one performance counter and the * + * other is available from the other performance counter. Hence, the * + * II supports all 17*16=272 possible combinations of quantities to * + * measure. * + * * + ************************************************************************/ + +typedef union ii_ipcr_u { + uint64_t ii_ipcr_regval; + struct { + uint64_t i_ippr0_c : 4; + uint64_t i_ippr1_c : 4; + uint64_t i_icct : 8; + uint64_t i_rsvd : 48; + } ii_ipcr_fld_s; +} ii_ipcr_u_t; + + +/************************************************************************ + * * + * * + * * + ************************************************************************/ + +typedef union ii_ippr_u { + uint64_t ii_ippr_regval; + struct { + uint64_t i_ippr0 : 32; + uint64_t i_ippr1 : 32; + } ii_ippr_fld_s; +} ii_ippr_u_t; + + + +/************************************************************************** + * * + * The following defines which were not formed into structures are * + * probably indentical to another register, and the name of the * + * register is provided against each of these registers. This * + * information needs to be checked carefully * + * * + * IIO_ICRB1_A IIO_ICRB0_A * + * IIO_ICRB1_B IIO_ICRB0_B * + * IIO_ICRB1_C IIO_ICRB0_C * + * IIO_ICRB1_D IIO_ICRB0_D * + * IIO_ICRB1_E IIO_ICRB0_E * + * IIO_ICRB2_A IIO_ICRB0_A * + * IIO_ICRB2_B IIO_ICRB0_B * + * IIO_ICRB2_C IIO_ICRB0_C * + * IIO_ICRB2_D IIO_ICRB0_D * + * IIO_ICRB2_E IIO_ICRB0_E * + * IIO_ICRB3_A IIO_ICRB0_A * + * IIO_ICRB3_B IIO_ICRB0_B * + * IIO_ICRB3_C IIO_ICRB0_C * + * IIO_ICRB3_D IIO_ICRB0_D * + * IIO_ICRB3_E IIO_ICRB0_E * + * IIO_ICRB4_A IIO_ICRB0_A * + * IIO_ICRB4_B IIO_ICRB0_B * + * IIO_ICRB4_C IIO_ICRB0_C * + * IIO_ICRB4_D IIO_ICRB0_D * + * IIO_ICRB4_E IIO_ICRB0_E * + * IIO_ICRB5_A IIO_ICRB0_A * + * IIO_ICRB5_B IIO_ICRB0_B * + * IIO_ICRB5_C IIO_ICRB0_C * + * IIO_ICRB5_D IIO_ICRB0_D * + * IIO_ICRB5_E IIO_ICRB0_E * + * IIO_ICRB6_A IIO_ICRB0_A * + * IIO_ICRB6_B IIO_ICRB0_B * + * IIO_ICRB6_C IIO_ICRB0_C * + * IIO_ICRB6_D IIO_ICRB0_D * + * IIO_ICRB6_E IIO_ICRB0_E * + * IIO_ICRB7_A IIO_ICRB0_A * + * IIO_ICRB7_B IIO_ICRB0_B * + * IIO_ICRB7_C IIO_ICRB0_C * + * IIO_ICRB7_D IIO_ICRB0_D * + * IIO_ICRB7_E IIO_ICRB0_E * + * IIO_ICRB8_A IIO_ICRB0_A * + * IIO_ICRB8_B IIO_ICRB0_B * + * IIO_ICRB8_C IIO_ICRB0_C * + * IIO_ICRB8_D IIO_ICRB0_D * + * IIO_ICRB8_E IIO_ICRB0_E * + * IIO_ICRB9_A IIO_ICRB0_A * + * IIO_ICRB9_B IIO_ICRB0_B * + * IIO_ICRB9_C IIO_ICRB0_C * + * IIO_ICRB9_D IIO_ICRB0_D * + * IIO_ICRB9_E IIO_ICRB0_E * + * IIO_ICRBA_A IIO_ICRB0_A * + * IIO_ICRBA_B IIO_ICRB0_B * + * IIO_ICRBA_C IIO_ICRB0_C * + * IIO_ICRBA_D IIO_ICRB0_D * + * IIO_ICRBA_E IIO_ICRB0_E * + * IIO_ICRBB_A IIO_ICRB0_A * + * IIO_ICRBB_B IIO_ICRB0_B * + * IIO_ICRBB_C IIO_ICRB0_C * + * IIO_ICRBB_D IIO_ICRB0_D * + * IIO_ICRBB_E IIO_ICRB0_E * + * IIO_ICRBC_A IIO_ICRB0_A * + * IIO_ICRBC_B IIO_ICRB0_B * + * IIO_ICRBC_C IIO_ICRB0_C * + * IIO_ICRBC_D IIO_ICRB0_D * + * IIO_ICRBC_E IIO_ICRB0_E * + * IIO_ICRBD_A IIO_ICRB0_A * + * IIO_ICRBD_B IIO_ICRB0_B * + * IIO_ICRBD_C IIO_ICRB0_C * + * IIO_ICRBD_D IIO_ICRB0_D * + * IIO_ICRBD_E IIO_ICRB0_E * + * IIO_ICRBE_A IIO_ICRB0_A * + * IIO_ICRBE_B IIO_ICRB0_B * + * IIO_ICRBE_C IIO_ICRB0_C * + * IIO_ICRBE_D IIO_ICRB0_D * + * IIO_ICRBE_E IIO_ICRB0_E * + * * + **************************************************************************/ + + +/* + * Slightly friendlier names for some common registers. + */ +#define IIO_WIDGET IIO_WID /* Widget identification */ +#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */ +#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */ +#define IIO_PROTECT IIO_ILAPR /* IO interface protection */ +#define IIO_PROTECT_OVRRD IIO_ILAPO /* IO protect override */ +#define IIO_OUTWIDGET_ACCESS IIO_IOWA /* Outbound widget access */ +#define IIO_INWIDGET_ACCESS IIO_IIWA /* Inbound widget access */ +#define IIO_INDEV_ERR_MASK IIO_IIDEM /* Inbound device error mask */ +#define IIO_LLP_CSR IIO_ILCSR /* LLP control and status */ +#define IIO_LLP_LOG IIO_ILLR /* LLP log */ +#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout*/ +#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */ +#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */ +#define IIO_IGFX_0 IIO_IGFX0 +#define IIO_IGFX_1 IIO_IGFX1 +#define IIO_IBCT_0 IIO_IBCT0 +#define IIO_IBCT_1 IIO_IBCT1 +#define IIO_IBLS_0 IIO_IBLS0 +#define IIO_IBLS_1 IIO_IBLS1 +#define IIO_IBSA_0 IIO_IBSA0 +#define IIO_IBSA_1 IIO_IBSA1 +#define IIO_IBDA_0 IIO_IBDA0 +#define IIO_IBDA_1 IIO_IBDA1 +#define IIO_IBNA_0 IIO_IBNA0 +#define IIO_IBNA_1 IIO_IBNA1 +#define IIO_IBIA_0 IIO_IBIA0 +#define IIO_IBIA_1 IIO_IBIA1 +#define IIO_IOPRB_0 IIO_IPRB0 + +#define IIO_PRTE_A(_x) (IIO_IPRTE0_A + (8 * (_x))) +#define IIO_PRTE_B(_x) (IIO_IPRTE0_B + (8 * (_x))) +#define IIO_NUM_PRTES 8 /* Total number of PRB table entries */ +#define IIO_WIDPRTE_A(x) IIO_PRTE_A(((x) - 8)) /* widget ID to its PRTE num */ +#define IIO_WIDPRTE_B(x) IIO_PRTE_B(((x) - 8)) /* widget ID to its PRTE num */ + +#define IIO_NUM_IPRBS (9) + +#define IIO_LLP_CSR_IS_UP 0x00002000 +#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000 +#define IIO_LLP_CSR_LLP_STAT_SHFT 12 + +#define IIO_LLP_CB_MAX 0xffff /* in ILLR CB_CNT, Max Check Bit errors */ +#define IIO_LLP_SN_MAX 0xffff /* in ILLR SN_CNT, Max Sequence Number errors */ + +/* key to IIO_PROTECT_OVRRD */ +#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */ + +/* BTE register names */ +#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */ +#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */ +#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */ +#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */ +#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */ +#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */ +#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */ +#define IIO_BTE_OFF_1 (IIO_IBLS_1 - IIO_IBLS_0) /* Offset from base to BTE 1 */ + +/* BTE register offsets from base */ +#define BTEOFF_STAT 0 +#define BTEOFF_SRC (IIO_BTE_SRC_0 - IIO_BTE_STAT_0) +#define BTEOFF_DEST (IIO_BTE_DEST_0 - IIO_BTE_STAT_0) +#define BTEOFF_CTRL (IIO_BTE_CTRL_0 - IIO_BTE_STAT_0) +#define BTEOFF_NOTIFY (IIO_BTE_NOTIFY_0 - IIO_BTE_STAT_0) +#define BTEOFF_INT (IIO_BTE_INT_0 - IIO_BTE_STAT_0) + + +/* names used in shub diags */ +#define IIO_BASE_BTE0 IIO_IBLS_0 +#define IIO_BASE_BTE1 IIO_IBLS_1 + +/* + * Macro which takes the widget number, and returns the + * IO PRB address of that widget. + * value _x is expected to be a widget number in the range + * 0, 8 - 0xF + */ +#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \ + (_x) : \ + (_x) - (HUB_WIDGET_ID_MIN-1)) << 3) ) + + +/* GFX Flow Control Node/Widget Register */ +#define IIO_IGFX_W_NUM_BITS 4 /* size of widget num field */ +#define IIO_IGFX_W_NUM_MASK ((1<<IIO_IGFX_W_NUM_BITS)-1) +#define IIO_IGFX_W_NUM_SHIFT 0 +#define IIO_IGFX_PI_NUM_BITS 1 /* size of PI num field */ +#define IIO_IGFX_PI_NUM_MASK ((1<<IIO_IGFX_PI_NUM_BITS)-1) +#define IIO_IGFX_PI_NUM_SHIFT 4 +#define IIO_IGFX_N_NUM_BITS 8 /* size of node num field */ +#define IIO_IGFX_N_NUM_MASK ((1<<IIO_IGFX_N_NUM_BITS)-1) +#define IIO_IGFX_N_NUM_SHIFT 5 +#define IIO_IGFX_P_NUM_BITS 1 /* size of processor num field */ +#define IIO_IGFX_P_NUM_MASK ((1<<IIO_IGFX_P_NUM_BITS)-1) +#define IIO_IGFX_P_NUM_SHIFT 16 +#define IIO_IGFX_INIT(widget, pi, node, cpu) (\ + (((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) | \ + (((pi) & IIO_IGFX_PI_NUM_MASK)<< IIO_IGFX_PI_NUM_SHIFT)| \ + (((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \ + (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT)) + + +/* Scratch registers (all bits available) */ +#define IIO_SCRATCH_REG0 IIO_ISCR0 +#define IIO_SCRATCH_REG1 IIO_ISCR1 +#define IIO_SCRATCH_MASK 0xffffffffffffffffUL + +#define IIO_SCRATCH_BIT0_0 0x0000000000000001UL +#define IIO_SCRATCH_BIT0_1 0x0000000000000002UL +#define IIO_SCRATCH_BIT0_2 0x0000000000000004UL +#define IIO_SCRATCH_BIT0_3 0x0000000000000008UL +#define IIO_SCRATCH_BIT0_4 0x0000000000000010UL +#define IIO_SCRATCH_BIT0_5 0x0000000000000020UL +#define IIO_SCRATCH_BIT0_6 0x0000000000000040UL +#define IIO_SCRATCH_BIT0_7 0x0000000000000080UL +#define IIO_SCRATCH_BIT0_8 0x0000000000000100UL +#define IIO_SCRATCH_BIT0_9 0x0000000000000200UL +#define IIO_SCRATCH_BIT0_A 0x0000000000000400UL + +#define IIO_SCRATCH_BIT1_0 0x0000000000000001UL +#define IIO_SCRATCH_BIT1_1 0x0000000000000002UL +/* IO Translation Table Entries */ +#define IIO_NUM_ITTES 7 /* ITTEs numbered 0..6 */ + /* Hw manuals number them 1..7! */ +/* + * IIO_IMEM Register fields. + */ +#define IIO_IMEM_W0ESD 0x1UL /* Widget 0 shut down due to error */ +#define IIO_IMEM_B0ESD (1UL << 4) /* BTE 0 shut down due to error */ +#define IIO_IMEM_B1ESD (1UL << 8) /* BTE 1 Shut down due to error */ + +/* + * As a permanent workaround for a bug in the PI side of the shub, we've + * redefined big window 7 as small window 0. + XXX does this still apply for SN1?? + */ +#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) + +/* + * Use the top big window as a surrogate for the first small window + */ +#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW + +#define ILCSR_WARM_RESET 0x100 + +/* + * CRB manipulation macros + * The CRB macros are slightly complicated, since there are up to + * four registers associated with each CRB entry. + */ +#define IIO_NUM_CRBS 15 /* Number of CRBs */ +#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */ +#define IIO_ICRB_OFFSET 8 +#define IIO_ICRB_0 IIO_ICRB0_A +#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */ +/* XXX - This is now tuneable: + #define IIO_FIRST_PC_ENTRY 12 + */ + +#define IIO_ICRB_A(_x) ((u64)(IIO_ICRB_0 + (6 * IIO_ICRB_OFFSET * (_x)))) +#define IIO_ICRB_B(_x) ((u64)((char *)IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)) +#define IIO_ICRB_C(_x) ((u64)((char *)IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)) +#define IIO_ICRB_D(_x) ((u64)((char *)IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)) +#define IIO_ICRB_E(_x) ((u64)((char *)IIO_ICRB_A(_x) + 4*IIO_ICRB_OFFSET)) + +#define TNUM_TO_WIDGET_DEV(_tnum) (_tnum & 0x7) + +/* + * values for "ecode" field + */ +#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */ +#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */ +#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access + * e.g. WINV to a Read only line. */ +#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */ +#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */ +#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */ +#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */ +#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */ + +/* + * Values for field imsgtype + */ +#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */ +#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */ +#define IIO_ICRB_IMSGT_SN1NET 2 /* Incoming message from SN1 net */ +#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */ + +/* + * values for field initiator. + */ +#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */ +#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */ +#define IIO_ICRB_INIT_SN1NET 0x2 /* Message originated in SN1net */ +#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */ +#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */ + +/* + * Number of credits Hub widget has while sending req/response to + * xbow. + * Value of 3 is required by Xbow 1.1 + * We may be able to increase this to 4 with Xbow 1.2. + */ +#define HUBII_XBOW_CREDIT 3 +#define HUBII_XBOW_REV2_CREDIT 4 + +/* + * Number of credits that xtalk devices should use when communicating + * with a SHub (depth of SHub's queue). + */ +#define HUB_CREDIT 4 + +/* + * Some IIO_PRB fields + */ +#define IIO_PRB_MULTI_ERR (1LL << 63) +#define IIO_PRB_SPUR_RD (1LL << 51) +#define IIO_PRB_SPUR_WR (1LL << 50) +#define IIO_PRB_RD_TO (1LL << 49) +#define IIO_PRB_ERROR (1LL << 48) + +/************************************************************************* + + Some of the IIO field masks and shifts are defined here. + This is in order to maintain compatibility in SN0 and SN1 code + +**************************************************************************/ + +/* + * ICMR register fields + * (Note: the IIO_ICMR_P_CNT and IIO_ICMR_PC_VLD from Hub are not + * present in SHub) + */ + +#define IIO_ICMR_CRB_VLD_SHFT 20 +#define IIO_ICMR_CRB_VLD_MASK (0x7fffUL << IIO_ICMR_CRB_VLD_SHFT) + +#define IIO_ICMR_FC_CNT_SHFT 16 +#define IIO_ICMR_FC_CNT_MASK (0xf << IIO_ICMR_FC_CNT_SHFT) + +#define IIO_ICMR_C_CNT_SHFT 4 +#define IIO_ICMR_C_CNT_MASK (0xf << IIO_ICMR_C_CNT_SHFT) + +#define IIO_ICMR_PRECISE (1UL << 52) +#define IIO_ICMR_CLR_RPPD (1UL << 13) +#define IIO_ICMR_CLR_RQPD (1UL << 12) + +/* + * IIO PIO Deallocation register field masks : (IIO_IPDR) + XXX present but not needed in bedrock? See the manual. + */ +#define IIO_IPDR_PND (1 << 4) + +/* + * IIO CRB deallocation register field masks: (IIO_ICDR) + */ +#define IIO_ICDR_PND (1 << 4) + +/* + * IO BTE Length/Status (IIO_IBLS) register bit field definitions + */ +#define IBLS_BUSY (0x1UL << 20) +#define IBLS_ERROR_SHFT 16 +#define IBLS_ERROR (0x1UL << IBLS_ERROR_SHFT) +#define IBLS_LENGTH_MASK 0xffff + +/* + * IO BTE Control/Terminate register (IBCT) register bit field definitions + */ +#define IBCT_POISON (0x1UL << 8) +#define IBCT_NOTIFY (0x1UL << 4) +#define IBCT_ZFIL_MODE (0x1UL << 0) + +/* + * IIO Incoming Error Packet Header (IIO_IIEPH1/IIO_IIEPH2) + */ +#define IIEPH1_VALID (1UL << 44) +#define IIEPH1_OVERRUN (1UL << 40) +#define IIEPH1_ERR_TYPE_SHFT 32 +#define IIEPH1_ERR_TYPE_MASK 0xf +#define IIEPH1_SOURCE_SHFT 20 +#define IIEPH1_SOURCE_MASK 11 +#define IIEPH1_SUPPL_SHFT 8 +#define IIEPH1_SUPPL_MASK 11 +#define IIEPH1_CMD_SHFT 0 +#define IIEPH1_CMD_MASK 7 + +#define IIEPH2_TAIL (1UL << 40) +#define IIEPH2_ADDRESS_SHFT 0 +#define IIEPH2_ADDRESS_MASK 38 + +#define IIEPH1_ERR_SHORT_REQ 2 +#define IIEPH1_ERR_SHORT_REPLY 3 +#define IIEPH1_ERR_LONG_REQ 4 +#define IIEPH1_ERR_LONG_REPLY 5 + +/* + * IO Error Clear register bit field definitions + */ +#define IECLR_PI1_FWD_INT (1UL << 31) /* clear PI1_FORWARD_INT in iidsr */ +#define IECLR_PI0_FWD_INT (1UL << 30) /* clear PI0_FORWARD_INT in iidsr */ +#define IECLR_SPUR_RD_HDR (1UL << 29) /* clear valid bit in ixss reg */ +#define IECLR_BTE1 (1UL << 18) /* clear bte error 1 */ +#define IECLR_BTE0 (1UL << 17) /* clear bte error 0 */ +#define IECLR_CRAZY (1UL << 16) /* clear crazy bit in wstat reg */ +#define IECLR_PRB_F (1UL << 15) /* clear err bit in PRB_F reg */ +#define IECLR_PRB_E (1UL << 14) /* clear err bit in PRB_E reg */ +#define IECLR_PRB_D (1UL << 13) /* clear err bit in PRB_D reg */ +#define IECLR_PRB_C (1UL << 12) /* clear err bit in PRB_C reg */ +#define IECLR_PRB_B (1UL << 11) /* clear err bit in PRB_B reg */ +#define IECLR_PRB_A (1UL << 10) /* clear err bit in PRB_A reg */ +#define IECLR_PRB_9 (1UL << 9) /* clear err bit in PRB_9 reg */ +#define IECLR_PRB_8 (1UL << 8) /* clear err bit in PRB_8 reg */ +#define IECLR_PRB_0 (1UL << 0) /* clear err bit in PRB_0 reg */ + +/* + * IIO CRB control register Fields: IIO_ICCR + */ +#define IIO_ICCR_PENDING (0x10000) +#define IIO_ICCR_CMD_MASK (0xFF) +#define IIO_ICCR_CMD_SHFT (7) +#define IIO_ICCR_CMD_NOP (0x0) /* No Op */ +#define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */ +#define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */ +#define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory + * via a WB + */ +#define IIO_ICCR_CMD_FLUSH (0x800) + +/* + * + * CRB Register description. + * + * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING + * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING + * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING + * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING + * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING * WARNING + * + * Many of the fields in CRB are status bits used by hardware + * for implementation of the protocol. It's very dangerous to + * mess around with the CRB registers. + * + * It's OK to read the CRB registers and try to make sense out of the + * fields in CRB. + * + * Updating CRB requires all activities in Hub IIO to be quiesced. + * otherwise, a write to CRB could corrupt other CRB entries. + * CRBs are here only as a back door peek to shub IIO's status. + * Quiescing implies no dmas no PIOs + * either directly from the cpu or from sn0net. + * this is not something that can be done easily. So, AVOID updating + * CRBs. + */ + +/* + * Easy access macros for CRBs, all 5 registers (A-E) + */ +typedef ii_icrb0_a_u_t icrba_t; +#define a_sidn ii_icrb0_a_fld_s.ia_sidn +#define a_tnum ii_icrb0_a_fld_s.ia_tnum +#define a_addr ii_icrb0_a_fld_s.ia_addr +#define a_valid ii_icrb0_a_fld_s.ia_vld +#define a_iow ii_icrb0_a_fld_s.ia_iow +#define a_regvalue ii_icrb0_a_regval + +typedef ii_icrb0_b_u_t icrbb_t; +#define b_use_old ii_icrb0_b_fld_s.ib_use_old +#define b_imsgtype ii_icrb0_b_fld_s.ib_imsgtype +#define b_imsg ii_icrb0_b_fld_s.ib_imsg +#define b_initiator ii_icrb0_b_fld_s.ib_init +#define b_exc ii_icrb0_b_fld_s.ib_exc +#define b_ackcnt ii_icrb0_b_fld_s.ib_ack_cnt +#define b_resp ii_icrb0_b_fld_s.ib_resp +#define b_ack ii_icrb0_b_fld_s.ib_ack +#define b_hold ii_icrb0_b_fld_s.ib_hold +#define b_wb ii_icrb0_b_fld_s.ib_wb +#define b_intvn ii_icrb0_b_fld_s.ib_intvn +#define b_stall_ib ii_icrb0_b_fld_s.ib_stall_ib +#define b_stall_int ii_icrb0_b_fld_s.ib_stall__intr +#define b_stall_bte_0 ii_icrb0_b_fld_s.ib_stall__bte_0 +#define b_stall_bte_1 ii_icrb0_b_fld_s.ib_stall__bte_1 +#define b_error ii_icrb0_b_fld_s.ib_error +#define b_ecode ii_icrb0_b_fld_s.ib_errcode +#define b_lnetuce ii_icrb0_b_fld_s.ib_ln_uce +#define b_mark ii_icrb0_b_fld_s.ib_mark +#define b_xerr ii_icrb0_b_fld_s.ib_xt_err +#define b_regvalue ii_icrb0_b_regval + +typedef ii_icrb0_c_u_t icrbc_t; +#define c_suppl ii_icrb0_c_fld_s.ic_suppl +#define c_barrop ii_icrb0_c_fld_s.ic_bo +#define c_doresp ii_icrb0_c_fld_s.ic_resprqd +#define c_gbr ii_icrb0_c_fld_s.ic_gbr +#define c_btenum ii_icrb0_c_fld_s.ic_bte_num +#define c_cohtrans ii_icrb0_c_fld_s.ic_ct +#define c_xtsize ii_icrb0_c_fld_s.ic_size +#define c_source ii_icrb0_c_fld_s.ic_source +#define c_regvalue ii_icrb0_c_regval + + +typedef ii_icrb0_d_u_t icrbd_t; +#define d_sleep ii_icrb0_d_fld_s.id_sleep +#define d_pricnt ii_icrb0_d_fld_s.id_pr_cnt +#define d_pripsc ii_icrb0_d_fld_s.id_pr_psc +#define d_bteop ii_icrb0_d_fld_s.id_bte_op +#define d_bteaddr ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/ +#define d_benable ii_icrb0_d_fld_s.id_pa_be /* ic_pa_be fld has 2 names*/ +#define d_regvalue ii_icrb0_d_regval + +typedef ii_icrb0_e_u_t icrbe_t; +#define icrbe_ctxtvld ii_icrb0_e_fld_s.ie_cvld +#define icrbe_toutvld ii_icrb0_e_fld_s.ie_tvld +#define icrbe_context ii_icrb0_e_fld_s.ie_context +#define icrbe_timeout ii_icrb0_e_fld_s.ie_timeout +#define e_regvalue ii_icrb0_e_regval + + +/* Number of widgets supported by shub */ +#define HUB_NUM_WIDGET 9 +#define HUB_WIDGET_ID_MIN 0x8 +#define HUB_WIDGET_ID_MAX 0xf + +#define HUB_WIDGET_PART_NUM 0xc120 +#define MAX_HUBS_PER_XBOW 2 + +/* A few more #defines for backwards compatibility */ +#define iprb_t ii_iprb0_u_t +#define iprb_regval ii_iprb0_regval +#define iprb_mult_err ii_iprb0_fld_s.i_mult_err +#define iprb_spur_rd ii_iprb0_fld_s.i_spur_rd +#define iprb_spur_wr ii_iprb0_fld_s.i_spur_wr +#define iprb_rd_to ii_iprb0_fld_s.i_rd_to +#define iprb_ovflow ii_iprb0_fld_s.i_of_cnt +#define iprb_error ii_iprb0_fld_s.i_error +#define iprb_ff ii_iprb0_fld_s.i_f +#define iprb_mode ii_iprb0_fld_s.i_m +#define iprb_bnakctr ii_iprb0_fld_s.i_nb +#define iprb_anakctr ii_iprb0_fld_s.i_na +#define iprb_xtalkctr ii_iprb0_fld_s.i_c + +#define LNK_STAT_WORKING 0x2 /* LLP is working */ + +#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */ +#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */ +#define IIO_WSTAT_TXRETRY_MASK (0x7F) /* should be 0xFF?? */ +#define IIO_WSTAT_TXRETRY_SHFT (16) +#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \ + IIO_WSTAT_TXRETRY_MASK) + +/* Number of II perf. counters we can multiplex at once */ + +#define IO_PERF_SETS 32 + +/* Bit for the widget in inbound access register */ +#define IIO_IIWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) +/* Bit for the widget in outbound access register */ +#define IIO_IOWA_WIDGET(_w) ((uint64_t)(1ULL << _w)) + +/* NOTE: The following define assumes that we are going to get + * widget numbers from 8 thru F and the device numbers within + * widget from 0 thru 7. + */ +#define IIO_IIDEM_WIDGETDEV_MASK(w, d) ((uint64_t)(1ULL << (8 * ((w) - 8) + (d)))) + +/* IO Interrupt Destination Register */ +#define IIO_IIDSR_SENT_SHIFT 28 +#define IIO_IIDSR_SENT_MASK 0x30000000 +#define IIO_IIDSR_ENB_SHIFT 24 +#define IIO_IIDSR_ENB_MASK 0x01000000 +#define IIO_IIDSR_NODE_SHIFT 9 +#define IIO_IIDSR_NODE_MASK 0x000ff700 +#define IIO_IIDSR_PI_ID_SHIFT 8 +#define IIO_IIDSR_PI_ID_MASK 0x00000100 +#define IIO_IIDSR_LVL_SHIFT 0 +#define IIO_IIDSR_LVL_MASK 0x000000ff + +/* Xtalk timeout threshhold register (IIO_IXTT) */ +#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ +#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) +#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ +#define IXTT_RRSP_PS_MASK (0x7FFFFFULL << IXTT_RRSP_PS_SHFT) +#define IXTT_TAIL_TO_SHFT 0 /* tail timeout counter threshold */ +#define IXTT_TAIL_TO_MASK (0x3FFFFFFULL << IXTT_TAIL_TO_SHFT) + +/* + * The IO LLP control status register and widget control register + */ + +typedef union hubii_wcr_u { + uint64_t wcr_reg_value; + struct { + uint64_t wcr_widget_id: 4, /* LLP crossbar credit */ + wcr_tag_mode: 1, /* Tag mode */ + wcr_rsvd1: 8, /* Reserved */ + wcr_xbar_crd: 3, /* LLP crossbar credit */ + wcr_f_bad_pkt: 1, /* Force bad llp pkt enable */ + wcr_dir_con: 1, /* widget direct connect */ + wcr_e_thresh: 5, /* elasticity threshold */ + wcr_rsvd: 41; /* unused */ + } wcr_fields_s; +} hubii_wcr_t; + +#define iwcr_dir_con wcr_fields_s.wcr_dir_con + +/* The structures below are defined to extract and modify the ii +performance registers */ + +/* io_perf_sel allows the caller to specify what tests will be + performed */ + +typedef union io_perf_sel { + uint64_t perf_sel_reg; + struct { + uint64_t perf_ippr0 : 4, + perf_ippr1 : 4, + perf_icct : 8, + perf_rsvd : 48; + } perf_sel_bits; +} io_perf_sel_t; + +/* io_perf_cnt is to extract the count from the shub registers. Due to + hardware problems there is only one counter, not two. */ + +typedef union io_perf_cnt { + uint64_t perf_cnt; + struct { + uint64_t perf_cnt : 20, + perf_rsvd2 : 12, + perf_rsvd1 : 32; + } perf_cnt_bits; + +} io_perf_cnt_t; + +typedef union iprte_a { + uint64_t entry; + struct { + uint64_t i_rsvd_1 : 3; + uint64_t i_addr : 38; + uint64_t i_init : 3; + uint64_t i_source : 8; + uint64_t i_rsvd : 2; + uint64_t i_widget : 4; + uint64_t i_to_cnt : 5; + uint64_t i_vld : 1; + } iprte_fields; +} iprte_a_t; + +#endif /* _ASM_IA64_SN_SHUBIO_H */ + diff --git a/include/asm-ia64/sn/simulator.h b/include/asm-ia64/sn/simulator.h new file mode 100644 index 000000000000..78eb4f869c8b --- /dev/null +++ b/include/asm-ia64/sn/simulator.h @@ -0,0 +1,27 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_SIMULATOR_H +#define _ASM_IA64_SN_SIMULATOR_H + +#include <linux/config.h> + +#ifdef CONFIG_IA64_SGI_SN_SIM + +#define SNMAGIC 0xaeeeeeee8badbeefL +#define IS_RUNNING_ON_SIMULATOR() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;}) + +#define SIMULATOR_SLEEP() asm("nop.i 0x8beef") + +#else + +#define IS_RUNNING_ON_SIMULATOR() (0) +#define SIMULATOR_SLEEP() + +#endif + +#endif /* _ASM_IA64_SN_SIMULATOR_H */ diff --git a/include/asm-ia64/sn/sn2/sn_hwperf.h b/include/asm-ia64/sn/sn2/sn_hwperf.h new file mode 100644 index 000000000000..b0c4d6dd77ba --- /dev/null +++ b/include/asm-ia64/sn/sn2/sn_hwperf.h @@ -0,0 +1,226 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. + * + * Data types used by the SN_SAL_HWPERF_OP SAL call for monitoring + * SGI Altix node and router hardware + * + * Mark Goodwin <markgw@sgi.com> Mon Aug 30 12:23:46 EST 2004 + */ + +#ifndef SN_HWPERF_H +#define SN_HWPERF_H + +/* + * object structure. SN_HWPERF_ENUM_OBJECTS and SN_HWPERF_GET_CPU_INFO + * return an array of these. Do not change this without also + * changing the corresponding SAL code. + */ +#define SN_HWPERF_MAXSTRING 128 +struct sn_hwperf_object_info { + u32 id; + union { + struct { + u64 this_part:1; + u64 is_shared:1; + } fields; + struct { + u64 flags; + u64 reserved; + } b; + } f; + char name[SN_HWPERF_MAXSTRING]; + char location[SN_HWPERF_MAXSTRING]; + u32 ports; +}; + +#define sn_hwp_this_part f.fields.this_part +#define sn_hwp_is_shared f.fields.is_shared +#define sn_hwp_flags f.b.flags + +/* macros for object classification */ +#define SN_HWPERF_IS_NODE(x) ((x) && strstr((x)->name, "SHub")) +#define SN_HWPERF_IS_IONODE(x) ((x) && strstr((x)->name, "TIO")) +#define SN_HWPERF_IS_ROUTER(x) ((x) && strstr((x)->name, "Router")) +#define SN_HWPERF_IS_NL3ROUTER(x) ((x) && strstr((x)->name, "NL3Router")) +#define SN_HWPERF_FOREIGN(x) ((x) && !(x)->sn_hwp_this_part && !(x)->sn_hwp_is_shared) +#define SN_HWPERF_SAME_OBJTYPE(x,y) ((SN_HWPERF_IS_NODE(x) && SN_HWPERF_IS_NODE(y)) ||\ + (SN_HWPERF_IS_IONODE(x) && SN_HWPERF_IS_IONODE(y)) ||\ + (SN_HWPERF_IS_ROUTER(x) && SN_HWPERF_IS_ROUTER(y))) + +/* numa port structure, SN_HWPERF_ENUM_PORTS returns an array of these */ +struct sn_hwperf_port_info { + u32 port; + u32 conn_id; + u32 conn_port; +}; + +/* for HWPERF_{GET,SET}_MMRS */ +struct sn_hwperf_data { + u64 addr; + u64 data; +}; + +/* user ioctl() argument, see below */ +struct sn_hwperf_ioctl_args { + u64 arg; /* argument, usually an object id */ + u64 sz; /* size of transfer */ + void *ptr; /* pointer to source/target */ + u32 v0; /* second return value */ +}; + +/* + * For SN_HWPERF_{GET,SET}_MMRS and SN_HWPERF_OBJECT_DISTANCE, + * sn_hwperf_ioctl_args.arg can be used to specify a CPU on which + * to call SAL, and whether to use an interprocessor interrupt + * or task migration in order to do so. If the CPU specified is + * SN_HWPERF_ARG_ANY_CPU, then the current CPU will be used. + */ +#define SN_HWPERF_ARG_ANY_CPU 0x7fffffffUL +#define SN_HWPERF_ARG_CPU_MASK 0x7fffffff00000000ULL +#define SN_HWPERF_ARG_USE_IPI_MASK 0x8000000000000000ULL +#define SN_HWPERF_ARG_OBJID_MASK 0x00000000ffffffffULL + +/* + * ioctl requests on the "sn_hwperf" misc device that call SAL. + */ +#define SN_HWPERF_OP_MEM_COPYIN 0x1000 +#define SN_HWPERF_OP_MEM_COPYOUT 0x2000 +#define SN_HWPERF_OP_MASK 0x0fff + +/* + * Determine mem requirement. + * arg don't care + * sz 8 + * p pointer to u64 integer + */ +#define SN_HWPERF_GET_HEAPSIZE 1 + +/* + * Install mem for SAL drvr + * arg don't care + * sz sizeof buffer pointed to by p + * p pointer to buffer for scratch area + */ +#define SN_HWPERF_INSTALL_HEAP 2 + +/* + * Determine number of objects + * arg don't care + * sz 8 + * p pointer to u64 integer + */ +#define SN_HWPERF_OBJECT_COUNT (10|SN_HWPERF_OP_MEM_COPYOUT) + +/* + * Determine object "distance", relative to a cpu. This operation can + * execute on a designated logical cpu number, using either an IPI or + * via task migration. If the cpu number is SN_HWPERF_ANY_CPU, then + * the current CPU is used. See the SN_HWPERF_ARG_* macros above. + * + * arg bitmap of IPI flag, cpu number and object id + * sz 8 + * p pointer to u64 integer + */ +#define SN_HWPERF_OBJECT_DISTANCE (11|SN_HWPERF_OP_MEM_COPYOUT) + +/* + * Enumerate objects. Special case if sz == 8, returns the required + * buffer size. + * arg don't care + * sz sizeof buffer pointed to by p + * p pointer to array of struct sn_hwperf_object_info + */ +#define SN_HWPERF_ENUM_OBJECTS (12|SN_HWPERF_OP_MEM_COPYOUT) + +/* + * Enumerate NumaLink ports for an object. Special case if sz == 8, + * returns the required buffer size. + * arg object id + * sz sizeof buffer pointed to by p + * p pointer to array of struct sn_hwperf_port_info + */ +#define SN_HWPERF_ENUM_PORTS (13|SN_HWPERF_OP_MEM_COPYOUT) + +/* + * SET/GET memory mapped registers. These operations can execute + * on a designated logical cpu number, using either an IPI or via + * task migration. If the cpu number is SN_HWPERF_ANY_CPU, then + * the current CPU is used. See the SN_HWPERF_ARG_* macros above. + * + * arg bitmap of ipi flag, cpu number and object id + * sz sizeof buffer pointed to by p + * p pointer to array of struct sn_hwperf_data + */ +#define SN_HWPERF_SET_MMRS (14|SN_HWPERF_OP_MEM_COPYIN) +#define SN_HWPERF_GET_MMRS (15|SN_HWPERF_OP_MEM_COPYOUT| \ + SN_HWPERF_OP_MEM_COPYIN) +/* + * Lock a shared object + * arg object id + * sz don't care + * p don't care + */ +#define SN_HWPERF_ACQUIRE 16 + +/* + * Unlock a shared object + * arg object id + * sz don't care + * p don't care + */ +#define SN_HWPERF_RELEASE 17 + +/* + * Break a lock on a shared object + * arg object id + * sz don't care + * p don't care + */ +#define SN_HWPERF_FORCE_RELEASE 18 + +/* + * ioctl requests on "sn_hwperf" that do not call SAL + */ + +/* + * get cpu info as an array of hwperf_object_info_t. + * id is logical CPU number, name is description, location + * is geoid (e.g. 001c04#1c). Special case if sz == 8, + * returns the required buffer size. + * + * arg don't care + * sz sizeof buffer pointed to by p + * p pointer to array of struct sn_hwperf_object_info + */ +#define SN_HWPERF_GET_CPU_INFO (100|SN_HWPERF_OP_MEM_COPYOUT) + +/* + * Given an object id, return it's node number (aka cnode). + * arg object id + * sz 8 + * p pointer to u64 integer + */ +#define SN_HWPERF_GET_OBJ_NODE (101|SN_HWPERF_OP_MEM_COPYOUT) + +/* + * Given a node number (cnode), return it's nasid. + * arg ordinal node number (aka cnodeid) + * sz 8 + * p pointer to u64 integer + */ +#define SN_HWPERF_GET_NODE_NASID (102|SN_HWPERF_OP_MEM_COPYOUT) + +/* return codes */ +#define SN_HWPERF_OP_OK 0 +#define SN_HWPERF_OP_NOMEM 1 +#define SN_HWPERF_OP_NO_PERM 2 +#define SN_HWPERF_OP_IO_ERROR 3 +#define SN_HWPERF_OP_BUSY 4 +#define SN_HWPERF_OP_RECONFIGURE 253 +#define SN_HWPERF_OP_INVAL 254 + +#endif /* SN_HWPERF_H */ diff --git a/include/asm-ia64/sn/sn_cpuid.h b/include/asm-ia64/sn/sn_cpuid.h new file mode 100644 index 000000000000..685435af170d --- /dev/null +++ b/include/asm-ia64/sn/sn_cpuid.h @@ -0,0 +1,144 @@ +/* + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ + + +#ifndef _ASM_IA64_SN_SN_CPUID_H +#define _ASM_IA64_SN_SN_CPUID_H + +#include <linux/config.h> +#include <linux/smp.h> +#include <asm/sn/addrs.h> +#include <asm/sn/pda.h> +#include <asm/intrinsics.h> + + +/* + * Functions for converting between cpuids, nodeids and NASIDs. + * + * These are for SGI platforms only. + * + */ + + + + +/* + * Definitions of terms (these definitions are for IA64 ONLY. Other architectures + * use cpuid/cpunum quite defferently): + * + * CPUID - a number in range of 0..NR_CPUS-1 that uniquely identifies + * the cpu. The value cpuid has no significance on IA64 other than + * the boot cpu is 0. + * smp_processor_id() returns the cpuid of the current cpu. + * + * CPU_PHYSICAL_ID (also known as HARD_PROCESSOR_ID) + * This is the same as 31:24 of the processor LID register + * hard_smp_processor_id()- cpu_physical_id of current processor + * cpu_physical_id(cpuid) - convert a <cpuid> to a <physical_cpuid> + * cpu_logical_id(phy_id) - convert a <physical_cpuid> to a <cpuid> + * * not real efficient - don't use in perf critical code + * + * SLICE - a number in the range of 0 - 3 (typically) that represents the + * cpu number on a brick. + * + * SUBNODE - (almost obsolete) the number of the FSB that a cpu is + * connected to. This is also the same as the PI number. Usually 0 or 1. + * + * NOTE!!!: the value of the bits in the cpu physical id (SAPICid or LID) of a cpu has no + * significance. The SAPIC id (LID) is a 16-bit cookie that has meaning only to the PROM. + * + * + * The macros convert between cpu physical ids & slice/nasid/cnodeid. + * These terms are described below: + * + * + * Brick + * ----- ----- ----- ----- CPU + * | 0 | | 1 | | 0 | | 1 | SLICE + * ----- ----- ----- ----- + * | | | | + * | | | | + * 0 | | 2 0 | | 2 FSB SLOT + * ------- ------- + * | | + * | | + * | | + * ------------ ------------- + * | | | | + * | SHUB | | SHUB | NASID (0..MAX_NASIDS) + * | |----- | | CNODEID (0..num_compact_nodes-1) + * | | | | + * | | | | + * ------------ ------------- + * | | + * + * + */ + +#ifndef CONFIG_SMP +#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) +#endif + + +#define get_node_number(addr) NASID_GET(addr) + +/* + * NOTE: on non-MP systems, only cpuid 0 exists + */ + +extern short physical_node_map[]; /* indexed by nasid to get cnode */ + +/* + * Macros for retrieving info about current cpu + */ +#define get_nasid() (nodepda->phys_cpuid[smp_processor_id()].nasid) +#define get_subnode() (nodepda->phys_cpuid[smp_processor_id()].subnode) +#define get_slice() (nodepda->phys_cpuid[smp_processor_id()].slice) +#define get_cnode() (nodepda->phys_cpuid[smp_processor_id()].cnode) +#define get_sapicid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff) + +/* + * Macros for retrieving info about an arbitrary cpu + * cpuid - logical cpu id + */ +#define cpuid_to_nasid(cpuid) (nodepda->phys_cpuid[cpuid].nasid) +#define cpuid_to_subnode(cpuid) (nodepda->phys_cpuid[cpuid].subnode) +#define cpuid_to_slice(cpuid) (nodepda->phys_cpuid[cpuid].slice) +#define cpuid_to_cnodeid(cpuid) (physical_node_map[cpuid_to_nasid(cpuid)]) + + +/* + * Dont use the following in performance critical code. They require scans + * of potentially large tables. + */ +extern int nasid_slice_to_cpuid(int, int); +#define nasid_slice_to_cpu_physical_id(nasid, slice) \ + cpu_physical_id(nasid_slice_to_cpuid(nasid, slice)) + +/* + * cnodeid_to_nasid - convert a cnodeid to a NASID + * Macro relies on pg_data for a node being on the node itself. + * Just extract the NASID from the pointer. + * + */ +#define cnodeid_to_nasid(cnodeid) pda->cnodeid_to_nasid_table[cnodeid] + +/* + * nasid_to_cnodeid - convert a NASID to a cnodeid + */ +#define nasid_to_cnodeid(nasid) (physical_node_map[nasid]) + +/* + * partition_coherence_id - get the coherence ID of the current partition + */ +extern u8 sn_coherency_id; +#define partition_coherence_id() (sn_coherency_id) + +#endif /* _ASM_IA64_SN_SN_CPUID_H */ + diff --git a/include/asm-ia64/sn/sn_fru.h b/include/asm-ia64/sn/sn_fru.h new file mode 100644 index 000000000000..8c21ac3f0156 --- /dev/null +++ b/include/asm-ia64/sn/sn_fru.h @@ -0,0 +1,44 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992-1997,1999-2004 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_SN_FRU_H +#define _ASM_IA64_SN_SN_FRU_H + +#define MAX_DIMMS 8 /* max # of dimm banks */ +#define MAX_PCIDEV 8 /* max # of pci devices on a pci bus */ + +typedef unsigned char confidence_t; + +typedef struct kf_mem_s { + confidence_t km_confidence; /* confidence level that the memory is bad + * is this necessary ? + */ + confidence_t km_dimm[MAX_DIMMS]; + /* confidence level that dimm[i] is bad + *I think this is the right number + */ + +} kf_mem_t; + +typedef struct kf_cpu_s { + confidence_t kc_confidence; /* confidence level that cpu is bad */ + confidence_t kc_icache; /* confidence level that instr. cache is bad */ + confidence_t kc_dcache; /* confidence level that data cache is bad */ + confidence_t kc_scache; /* confidence level that sec. cache is bad */ + confidence_t kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */ +} kf_cpu_t; + + +typedef struct kf_pci_bus_s { + confidence_t kpb_belief; /* confidence level that the pci bus is bad */ + confidence_t kpb_pcidev_belief[MAX_PCIDEV]; + /* confidence level that the pci dev is bad */ +} kf_pci_bus_t; + + +#endif /* _ASM_IA64_SN_SN_FRU_H */ + diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h new file mode 100644 index 000000000000..88c31b53dc09 --- /dev/null +++ b/include/asm-ia64/sn/sn_sal.h @@ -0,0 +1,1015 @@ +#ifndef _ASM_IA64_SN_SN_SAL_H +#define _ASM_IA64_SN_SN_SAL_H + +/* + * System Abstraction Layer definitions for IA64 + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ + + +#include <linux/config.h> +#include <asm/sal.h> +#include <asm/sn/sn_cpuid.h> +#include <asm/sn/arch.h> +#include <asm/sn/geo.h> +#include <asm/sn/nodepda.h> +#include <asm/sn/shub_mmr.h> + +// SGI Specific Calls +#define SN_SAL_POD_MODE 0x02000001 +#define SN_SAL_SYSTEM_RESET 0x02000002 +#define SN_SAL_PROBE 0x02000003 +#define SN_SAL_GET_MASTER_NASID 0x02000004 +#define SN_SAL_GET_KLCONFIG_ADDR 0x02000005 +#define SN_SAL_LOG_CE 0x02000006 +#define SN_SAL_REGISTER_CE 0x02000007 +#define SN_SAL_GET_PARTITION_ADDR 0x02000009 +#define SN_SAL_XP_ADDR_REGION 0x0200000f +#define SN_SAL_NO_FAULT_ZONE_VIRTUAL 0x02000010 +#define SN_SAL_NO_FAULT_ZONE_PHYSICAL 0x02000011 +#define SN_SAL_PRINT_ERROR 0x02000012 +#define SN_SAL_SET_ERROR_HANDLING_FEATURES 0x0200001a // reentrant +#define SN_SAL_GET_FIT_COMPT 0x0200001b // reentrant +#define SN_SAL_GET_SN_INFO 0x0200001c +#define SN_SAL_GET_SAPIC_INFO 0x0200001d +#define SN_SAL_CONSOLE_PUTC 0x02000021 +#define SN_SAL_CONSOLE_GETC 0x02000022 +#define SN_SAL_CONSOLE_PUTS 0x02000023 +#define SN_SAL_CONSOLE_GETS 0x02000024 +#define SN_SAL_CONSOLE_GETS_TIMEOUT 0x02000025 +#define SN_SAL_CONSOLE_POLL 0x02000026 +#define SN_SAL_CONSOLE_INTR 0x02000027 +#define SN_SAL_CONSOLE_PUTB 0x02000028 +#define SN_SAL_CONSOLE_XMIT_CHARS 0x0200002a +#define SN_SAL_CONSOLE_READC 0x0200002b +#define SN_SAL_SYSCTL_MODID_GET 0x02000031 +#define SN_SAL_SYSCTL_GET 0x02000032 +#define SN_SAL_SYSCTL_IOBRICK_MODULE_GET 0x02000033 +#define SN_SAL_SYSCTL_IO_PORTSPEED_GET 0x02000035 +#define SN_SAL_SYSCTL_SLAB_GET 0x02000036 +#define SN_SAL_BUS_CONFIG 0x02000037 +#define SN_SAL_SYS_SERIAL_GET 0x02000038 +#define SN_SAL_PARTITION_SERIAL_GET 0x02000039 +#define SN_SAL_SYSCTL_PARTITION_GET 0x0200003a +#define SN_SAL_SYSTEM_POWER_DOWN 0x0200003b +#define SN_SAL_GET_MASTER_BASEIO_NASID 0x0200003c +#define SN_SAL_COHERENCE 0x0200003d +#define SN_SAL_MEMPROTECT 0x0200003e +#define SN_SAL_SYSCTL_FRU_CAPTURE 0x0200003f + +#define SN_SAL_SYSCTL_IOBRICK_PCI_OP 0x02000042 // reentrant +#define SN_SAL_IROUTER_OP 0x02000043 +#define SN_SAL_IOIF_INTERRUPT 0x0200004a +#define SN_SAL_HWPERF_OP 0x02000050 // lock +#define SN_SAL_IOIF_ERROR_INTERRUPT 0x02000051 + +#define SN_SAL_IOIF_SLOT_ENABLE 0x02000053 +#define SN_SAL_IOIF_SLOT_DISABLE 0x02000054 +#define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055 +#define SN_SAL_IOIF_GET_PCIBUS_INFO 0x02000056 +#define SN_SAL_IOIF_GET_PCIDEV_INFO 0x02000057 +#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058 + +#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060 + + +/* + * Service-specific constants + */ + +/* Console interrupt manipulation */ + /* action codes */ +#define SAL_CONSOLE_INTR_OFF 0 /* turn the interrupt off */ +#define SAL_CONSOLE_INTR_ON 1 /* turn the interrupt on */ +#define SAL_CONSOLE_INTR_STATUS 2 /* retrieve the interrupt status */ + /* interrupt specification & status return codes */ +#define SAL_CONSOLE_INTR_XMIT 1 /* output interrupt */ +#define SAL_CONSOLE_INTR_RECV 2 /* input interrupt */ + +/* interrupt handling */ +#define SAL_INTR_ALLOC 1 +#define SAL_INTR_FREE 2 + +/* + * IRouter (i.e. generalized system controller) operations + */ +#define SAL_IROUTER_OPEN 0 /* open a subchannel */ +#define SAL_IROUTER_CLOSE 1 /* close a subchannel */ +#define SAL_IROUTER_SEND 2 /* send part of an IRouter packet */ +#define SAL_IROUTER_RECV 3 /* receive part of an IRouter packet */ +#define SAL_IROUTER_INTR_STATUS 4 /* check the interrupt status for + * an open subchannel + */ +#define SAL_IROUTER_INTR_ON 5 /* enable an interrupt */ +#define SAL_IROUTER_INTR_OFF 6 /* disable an interrupt */ +#define SAL_IROUTER_INIT 7 /* initialize IRouter driver */ + +/* IRouter interrupt mask bits */ +#define SAL_IROUTER_INTR_XMIT SAL_CONSOLE_INTR_XMIT +#define SAL_IROUTER_INTR_RECV SAL_CONSOLE_INTR_RECV + + +/* + * SAL Error Codes + */ +#define SALRET_MORE_PASSES 1 +#define SALRET_OK 0 +#define SALRET_NOT_IMPLEMENTED (-1) +#define SALRET_INVALID_ARG (-2) +#define SALRET_ERROR (-3) + + +/** + * sn_sal_rev_major - get the major SGI SAL revision number + * + * The SGI PROM stores its version in sal_[ab]_rev_(major|minor). + * This routine simply extracts the major value from the + * @ia64_sal_systab structure constructed by ia64_sal_init(). + */ +static inline int +sn_sal_rev_major(void) +{ + struct ia64_sal_systab *systab = efi.sal_systab; + + return (int)systab->sal_b_rev_major; +} + +/** + * sn_sal_rev_minor - get the minor SGI SAL revision number + * + * The SGI PROM stores its version in sal_[ab]_rev_(major|minor). + * This routine simply extracts the minor value from the + * @ia64_sal_systab structure constructed by ia64_sal_init(). + */ +static inline int +sn_sal_rev_minor(void) +{ + struct ia64_sal_systab *systab = efi.sal_systab; + + return (int)systab->sal_b_rev_minor; +} + +/* + * Specify the minimum PROM revsion required for this kernel. + * Note that they're stored in hex format... + */ +#define SN_SAL_MIN_MAJOR 0x4 /* SN2 kernels need at least PROM 4.0 */ +#define SN_SAL_MIN_MINOR 0x0 + +/* + * Returns the master console nasid, if the call fails, return an illegal + * value. + */ +static inline u64 +ia64_sn_get_console_nasid(void) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_NASID, 0, 0, 0, 0, 0, 0, 0); + + if (ret_stuff.status < 0) + return ret_stuff.status; + + /* Master console nasid is in 'v0' */ + return ret_stuff.v0; +} + +/* + * Returns the master baseio nasid, if the call fails, return an illegal + * value. + */ +static inline u64 +ia64_sn_get_master_baseio_nasid(void) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL(ret_stuff, SN_SAL_GET_MASTER_BASEIO_NASID, 0, 0, 0, 0, 0, 0, 0); + + if (ret_stuff.status < 0) + return ret_stuff.status; + + /* Master baseio nasid is in 'v0' */ + return ret_stuff.v0; +} + +static inline char * +ia64_sn_get_klconfig_addr(nasid_t nasid) +{ + struct ia64_sal_retval ret_stuff; + int cnodeid; + + cnodeid = nasid_to_cnodeid(nasid); + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL(ret_stuff, SN_SAL_GET_KLCONFIG_ADDR, (u64)nasid, 0, 0, 0, 0, 0, 0); + + /* + * We should panic if a valid cnode nasid does not produce + * a klconfig address. + */ + if (ret_stuff.status != 0) { + panic("ia64_sn_get_klconfig_addr: Returned error %lx\n", ret_stuff.status); + } + return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL; +} + +/* + * Returns the next console character. + */ +static inline u64 +ia64_sn_console_getc(int *ch) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_GETC, 0, 0, 0, 0, 0, 0, 0); + + /* character is in 'v0' */ + *ch = (int)ret_stuff.v0; + + return ret_stuff.status; +} + +/* + * Read a character from the SAL console device, after a previous interrupt + * or poll operation has given us to know that a character is available + * to be read. + */ +static inline u64 +ia64_sn_console_readc(void) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_READC, 0, 0, 0, 0, 0, 0, 0); + + /* character is in 'v0' */ + return ret_stuff.v0; +} + +/* + * Sends the given character to the console. + */ +static inline u64 +ia64_sn_console_putc(char ch) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTC, (uint64_t)ch, 0, 0, 0, 0, 0, 0); + + return ret_stuff.status; +} + +/* + * Sends the given buffer to the console. + */ +static inline u64 +ia64_sn_console_putb(const char *buf, int len) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_PUTB, (uint64_t)buf, (uint64_t)len, 0, 0, 0, 0, 0); + + if ( ret_stuff.status == 0 ) { + return ret_stuff.v0; + } + return (u64)0; +} + +/* + * Print a platform error record + */ +static inline u64 +ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, (uint64_t)rec, 0, 0, 0, 0, 0); + + return ret_stuff.status; +} + +/* + * Check for Platform errors + */ +static inline u64 +ia64_sn_plat_cpei_handler(void) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_LOG_CE, 0, 0, 0, 0, 0, 0, 0); + + return ret_stuff.status; +} + +/* + * Checks for console input. + */ +static inline u64 +ia64_sn_console_check(int *result) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_POLL, 0, 0, 0, 0, 0, 0, 0); + + /* result is in 'v0' */ + *result = (int)ret_stuff.v0; + + return ret_stuff.status; +} + +/* + * Checks console interrupt status + */ +static inline u64 +ia64_sn_console_intr_status(void) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, + 0, SAL_CONSOLE_INTR_STATUS, + 0, 0, 0, 0, 0); + + if (ret_stuff.status == 0) { + return ret_stuff.v0; + } + + return 0; +} + +/* + * Enable an interrupt on the SAL console device. + */ +static inline void +ia64_sn_console_intr_enable(uint64_t intr) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, + intr, SAL_CONSOLE_INTR_ON, + 0, 0, 0, 0, 0); +} + +/* + * Disable an interrupt on the SAL console device. + */ +static inline void +ia64_sn_console_intr_disable(uint64_t intr) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_INTR, + intr, SAL_CONSOLE_INTR_OFF, + 0, 0, 0, 0, 0); +} + +/* + * Sends a character buffer to the console asynchronously. + */ +static inline u64 +ia64_sn_console_xmit_chars(char *buf, int len) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_CONSOLE_XMIT_CHARS, + (uint64_t)buf, (uint64_t)len, + 0, 0, 0, 0, 0); + + if (ret_stuff.status == 0) { + return ret_stuff.v0; + } + + return 0; +} + +/* + * Returns the iobrick module Id + */ +static inline u64 +ia64_sn_sysctl_iobrick_module_get(nasid_t nasid, int *result) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYSCTL_IOBRICK_MODULE_GET, nasid, 0, 0, 0, 0, 0, 0); + + /* result is in 'v0' */ + *result = (int)ret_stuff.v0; + + return ret_stuff.status; +} + +/** + * ia64_sn_pod_mode - call the SN_SAL_POD_MODE function + * + * SN_SAL_POD_MODE actually takes an argument, but it's always + * 0 when we call it from the kernel, so we don't have to expose + * it to the caller. + */ +static inline u64 +ia64_sn_pod_mode(void) +{ + struct ia64_sal_retval isrv; + SAL_CALL(isrv, SN_SAL_POD_MODE, 0, 0, 0, 0, 0, 0, 0); + if (isrv.status) + return 0; + return isrv.v0; +} + +/** + * ia64_sn_probe_mem - read from memory safely + * @addr: address to probe + * @size: number bytes to read (1,2,4,8) + * @data_ptr: address to store value read by probe (-1 returned if probe fails) + * + * Call into the SAL to do a memory read. If the read generates a machine + * check, this routine will recover gracefully and return -1 to the caller. + * @addr is usually a kernel virtual address in uncached space (i.e. the + * address starts with 0xc), but if called in physical mode, @addr should + * be a physical address. + * + * Return values: + * 0 - probe successful + * 1 - probe failed (generated MCA) + * 2 - Bad arg + * <0 - PAL error + */ +static inline u64 +ia64_sn_probe_mem(long addr, long size, void *data_ptr) +{ + struct ia64_sal_retval isrv; + + SAL_CALL(isrv, SN_SAL_PROBE, addr, size, 0, 0, 0, 0, 0); + + if (data_ptr) { + switch (size) { + case 1: + *((u8*)data_ptr) = (u8)isrv.v0; + break; + case 2: + *((u16*)data_ptr) = (u16)isrv.v0; + break; + case 4: + *((u32*)data_ptr) = (u32)isrv.v0; + break; + case 8: + *((u64*)data_ptr) = (u64)isrv.v0; + break; + default: + isrv.status = 2; + } + } + return isrv.status; +} + +/* + * Retrieve the system serial number as an ASCII string. + */ +static inline u64 +ia64_sn_sys_serial_get(char *buf) +{ + struct ia64_sal_retval ret_stuff; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_SYS_SERIAL_GET, buf, 0, 0, 0, 0, 0, 0); + return ret_stuff.status; +} + +extern char sn_system_serial_number_string[]; +extern u64 sn_partition_serial_number; + +static inline char * +sn_system_serial_number(void) { + if (sn_system_serial_number_string[0]) { + return(sn_system_serial_number_string); + } else { + ia64_sn_sys_serial_get(sn_system_serial_number_string); + return(sn_system_serial_number_string); + } +} + + +/* + * Returns a unique id number for this system and partition (suitable for + * use with license managers), based in part on the system serial number. + */ +static inline u64 +ia64_sn_partition_serial_get(void) +{ + struct ia64_sal_retval ret_stuff; + SAL_CALL(ret_stuff, SN_SAL_PARTITION_SERIAL_GET, 0, 0, 0, 0, 0, 0, 0); + if (ret_stuff.status != 0) + return 0; + return ret_stuff.v0; +} + +static inline u64 +sn_partition_serial_number_val(void) { + if (sn_partition_serial_number) { + return(sn_partition_serial_number); + } else { + return(sn_partition_serial_number = ia64_sn_partition_serial_get()); + } +} + +/* + * Returns the partition id of the nasid passed in as an argument, + * or INVALID_PARTID if the partition id cannot be retrieved. + */ +static inline partid_t +ia64_sn_sysctl_partition_get(nasid_t nasid) +{ + struct ia64_sal_retval ret_stuff; + SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid, + 0, 0, 0, 0, 0, 0); + if (ret_stuff.status != 0) + return INVALID_PARTID; + return ((partid_t)ret_stuff.v0); +} + +/* + * Returns the partition id of the current processor. + */ + +extern partid_t sn_partid; + +static inline partid_t +sn_local_partid(void) { + if (sn_partid < 0) { + return (sn_partid = ia64_sn_sysctl_partition_get(cpuid_to_nasid(smp_processor_id()))); + } else { + return sn_partid; + } +} + +/* + * Register or unregister a physical address range being referenced across + * a partition boundary for which certain SAL errors should be scanned for, + * cleaned up and ignored. This is of value for kernel partitioning code only. + * Values for the operation argument: + * 1 = register this address range with SAL + * 0 = unregister this address range with SAL + * + * SAL maintains a reference count on an address range in case it is registered + * multiple times. + * + * On success, returns the reference count of the address range after the SAL + * call has performed the current registration/unregistration. Returns a + * negative value if an error occurred. + */ +static inline int +sn_register_xp_addr_region(u64 paddr, u64 len, int operation) +{ + struct ia64_sal_retval ret_stuff; + SAL_CALL(ret_stuff, SN_SAL_XP_ADDR_REGION, paddr, len, (u64)operation, + 0, 0, 0, 0); + return ret_stuff.status; +} + +/* + * Register or unregister an instruction range for which SAL errors should + * be ignored. If an error occurs while in the registered range, SAL jumps + * to return_addr after ignoring the error. Values for the operation argument: + * 1 = register this instruction range with SAL + * 0 = unregister this instruction range with SAL + * + * Returns 0 on success, or a negative value if an error occurred. + */ +static inline int +sn_register_nofault_code(u64 start_addr, u64 end_addr, u64 return_addr, + int virtual, int operation) +{ + struct ia64_sal_retval ret_stuff; + u64 call; + if (virtual) { + call = SN_SAL_NO_FAULT_ZONE_VIRTUAL; + } else { + call = SN_SAL_NO_FAULT_ZONE_PHYSICAL; + } + SAL_CALL(ret_stuff, call, start_addr, end_addr, return_addr, (u64)1, + 0, 0, 0); + return ret_stuff.status; +} + +/* + * Change or query the coherence domain for this partition. Each cpu-based + * nasid is represented by a bit in an array of 64-bit words: + * 0 = not in this partition's coherency domain + * 1 = in this partition's coherency domain + * + * It is not possible for the local system's nasids to be removed from + * the coherency domain. Purpose of the domain arguments: + * new_domain = set the coherence domain to the given nasids + * old_domain = return the current coherence domain + * + * Returns 0 on success, or a negative value if an error occurred. + */ +static inline int +sn_change_coherence(u64 *new_domain, u64 *old_domain) +{ + struct ia64_sal_retval ret_stuff; + SAL_CALL(ret_stuff, SN_SAL_COHERENCE, new_domain, old_domain, 0, 0, + 0, 0, 0); + return ret_stuff.status; +} + +/* + * Change memory access protections for a physical address range. + * nasid_array is not used on Altix, but may be in future architectures. + * Available memory protection access classes are defined after the function. + */ +static inline int +sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array) +{ + struct ia64_sal_retval ret_stuff; + int cnodeid; + unsigned long irq_flags; + + cnodeid = nasid_to_cnodeid(get_node_number(paddr)); + // spin_lock(&NODEPDA(cnodeid)->bist_lock); + local_irq_save(irq_flags); + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array, + perms, 0, 0, 0); + local_irq_restore(irq_flags); + // spin_unlock(&NODEPDA(cnodeid)->bist_lock); + return ret_stuff.status; +} +#define SN_MEMPROT_ACCESS_CLASS_0 0x14a080 +#define SN_MEMPROT_ACCESS_CLASS_1 0x2520c2 +#define SN_MEMPROT_ACCESS_CLASS_2 0x14a1ca +#define SN_MEMPROT_ACCESS_CLASS_3 0x14a290 +#define SN_MEMPROT_ACCESS_CLASS_6 0x084080 +#define SN_MEMPROT_ACCESS_CLASS_7 0x021080 + +/* + * Turns off system power. + */ +static inline void +ia64_sn_power_down(void) +{ + struct ia64_sal_retval ret_stuff; + SAL_CALL(ret_stuff, SN_SAL_SYSTEM_POWER_DOWN, 0, 0, 0, 0, 0, 0, 0); + while(1); + /* never returns */ +} + +/** + * ia64_sn_fru_capture - tell the system controller to capture hw state + * + * This routine will call the SAL which will tell the system controller(s) + * to capture hw mmr information from each SHub in the system. + */ +static inline u64 +ia64_sn_fru_capture(void) +{ + struct ia64_sal_retval isrv; + SAL_CALL(isrv, SN_SAL_SYSCTL_FRU_CAPTURE, 0, 0, 0, 0, 0, 0, 0); + if (isrv.status) + return 0; + return isrv.v0; +} + +/* + * Performs an operation on a PCI bus or slot -- power up, power down + * or reset. + */ +static inline u64 +ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type, + u64 bus, char slot, + u64 action) +{ + struct ia64_sal_retval rv = {0, 0, 0, 0}; + + SAL_CALL_NOLOCK(rv, SN_SAL_SYSCTL_IOBRICK_PCI_OP, connection_type, n, action, + bus, (u64) slot, 0, 0); + if (rv.status) + return rv.v0; + return 0; +} + + +/* + * Open a subchannel for sending arbitrary data to the system + * controller network via the system controller device associated with + * 'nasid'. Return the subchannel number or a negative error code. + */ +static inline int +ia64_sn_irtr_open(nasid_t nasid) +{ + struct ia64_sal_retval rv; + SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_OPEN, nasid, + 0, 0, 0, 0, 0); + return (int) rv.v0; +} + +/* + * Close system controller subchannel 'subch' previously opened on 'nasid'. + */ +static inline int +ia64_sn_irtr_close(nasid_t nasid, int subch) +{ + struct ia64_sal_retval rv; + SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_CLOSE, + (u64) nasid, (u64) subch, 0, 0, 0, 0); + return (int) rv.status; +} + +/* + * Read data from system controller associated with 'nasid' on + * subchannel 'subch'. The buffer to be filled is pointed to by + * 'buf', and its capacity is in the integer pointed to by 'len'. The + * referent of 'len' is set to the number of bytes read by the SAL + * call. The return value is either SALRET_OK (for bytes read) or + * SALRET_ERROR (for error or "no data available"). + */ +static inline int +ia64_sn_irtr_recv(nasid_t nasid, int subch, char *buf, int *len) +{ + struct ia64_sal_retval rv; + SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_RECV, + (u64) nasid, (u64) subch, (u64) buf, (u64) len, + 0, 0); + return (int) rv.status; +} + +/* + * Write data to the system controller network via the system + * controller associated with 'nasid' on suchannel 'subch'. The + * buffer to be written out is pointed to by 'buf', and 'len' is the + * number of bytes to be written. The return value is either the + * number of bytes written (which could be zero) or a negative error + * code. + */ +static inline int +ia64_sn_irtr_send(nasid_t nasid, int subch, char *buf, int len) +{ + struct ia64_sal_retval rv; + SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_SEND, + (u64) nasid, (u64) subch, (u64) buf, (u64) len, + 0, 0); + return (int) rv.v0; +} + +/* + * Check whether any interrupts are pending for the system controller + * associated with 'nasid' and its subchannel 'subch'. The return + * value is a mask of pending interrupts (SAL_IROUTER_INTR_XMIT and/or + * SAL_IROUTER_INTR_RECV). + */ +static inline int +ia64_sn_irtr_intr(nasid_t nasid, int subch) +{ + struct ia64_sal_retval rv; + SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_STATUS, + (u64) nasid, (u64) subch, 0, 0, 0, 0); + return (int) rv.v0; +} + +/* + * Enable the interrupt indicated by the intr parameter (either + * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV). + */ +static inline int +ia64_sn_irtr_intr_enable(nasid_t nasid, int subch, u64 intr) +{ + struct ia64_sal_retval rv; + SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_ON, + (u64) nasid, (u64) subch, intr, 0, 0, 0); + return (int) rv.v0; +} + +/* + * Disable the interrupt indicated by the intr parameter (either + * SAL_IROUTER_INTR_XMIT or SAL_IROUTER_INTR_RECV). + */ +static inline int +ia64_sn_irtr_intr_disable(nasid_t nasid, int subch, u64 intr) +{ + struct ia64_sal_retval rv; + SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INTR_OFF, + (u64) nasid, (u64) subch, intr, 0, 0, 0); + return (int) rv.v0; +} + +/** + * ia64_sn_get_fit_compt - read a FIT entry from the PROM header + * @nasid: NASID of node to read + * @index: FIT entry index to be retrieved (0..n) + * @fitentry: 16 byte buffer where FIT entry will be stored. + * @banbuf: optional buffer for retrieving banner + * @banlen: length of banner buffer + * + * Access to the physical PROM chips needs to be serialized since reads and + * writes can't occur at the same time, so we need to call into the SAL when + * we want to look at the FIT entries on the chips. + * + * Returns: + * %SALRET_OK if ok + * %SALRET_INVALID_ARG if index too big + * %SALRET_NOT_IMPLEMENTED if running on older PROM + * ??? if nasid invalid OR banner buffer not large enough + */ +static inline int +ia64_sn_get_fit_compt(u64 nasid, u64 index, void *fitentry, void *banbuf, + u64 banlen) +{ + struct ia64_sal_retval rv; + SAL_CALL_NOLOCK(rv, SN_SAL_GET_FIT_COMPT, nasid, index, fitentry, + banbuf, banlen, 0, 0); + return (int) rv.status; +} + +/* + * Initialize the SAL components of the system controller + * communication driver; specifically pass in a sizable buffer that + * can be used for allocation of subchannel queues as new subchannels + * are opened. "buf" points to the buffer, and "len" specifies its + * length. + */ +static inline int +ia64_sn_irtr_init(nasid_t nasid, void *buf, int len) +{ + struct ia64_sal_retval rv; + SAL_CALL_REENTRANT(rv, SN_SAL_IROUTER_OP, SAL_IROUTER_INIT, + (u64) nasid, (u64) buf, (u64) len, 0, 0, 0); + return (int) rv.status; +} + +/* + * Returns the nasid, subnode & slice corresponding to a SAPIC ID + * + * In: + * arg0 - SN_SAL_GET_SAPIC_INFO + * arg1 - sapicid (lid >> 16) + * Out: + * v0 - nasid + * v1 - subnode + * v2 - slice + */ +static inline u64 +ia64_sn_get_sapic_info(int sapicid, int *nasid, int *subnode, int *slice) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SAPIC_INFO, sapicid, 0, 0, 0, 0, 0, 0); + +/***** BEGIN HACK - temp til old proms no longer supported ********/ + if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) { + if (nasid) *nasid = sapicid & 0xfff; + if (subnode) *subnode = (sapicid >> 13) & 1; + if (slice) *slice = (sapicid >> 12) & 3; + return 0; + } +/***** END HACK *******/ + + if (ret_stuff.status < 0) + return ret_stuff.status; + + if (nasid) *nasid = (int) ret_stuff.v0; + if (subnode) *subnode = (int) ret_stuff.v1; + if (slice) *slice = (int) ret_stuff.v2; + return 0; +} + +/* + * Returns information about the HUB/SHUB. + * In: + * arg0 - SN_SAL_GET_SN_INFO + * arg1 - 0 (other values reserved for future use) + * Out: + * v0 + * [7:0] - shub type (0=shub1, 1=shub2) + * [15:8] - Log2 max number of nodes in entire system (includes + * C-bricks, I-bricks, etc) + * [23:16] - Log2 of nodes per sharing domain + * [31:24] - partition ID + * [39:32] - coherency_id + * [47:40] - regionsize + * v1 + * [15:0] - nasid mask (ex., 0x7ff for 11 bit nasid) + * [23:15] - bit position of low nasid bit + */ +static inline u64 +ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift, + u8 *systemsize, u8 *sharing_domain_size, u8 *partid, u8 *coher, u8 *reg) +{ + struct ia64_sal_retval ret_stuff; + + ret_stuff.status = 0; + ret_stuff.v0 = 0; + ret_stuff.v1 = 0; + ret_stuff.v2 = 0; + SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0); + +/***** BEGIN HACK - temp til old proms no longer supported ********/ + if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) { + int nasid = get_sapicid() & 0xfff;; +#define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL +#define SH_SHUB_ID_NODES_PER_BIT_SHFT 48 + if (shubtype) *shubtype = 0; + if (nasid_bitmask) *nasid_bitmask = 0x7ff; + if (nasid_shift) *nasid_shift = 38; + if (systemsize) *systemsize = 11; + if (sharing_domain_size) *sharing_domain_size = 9; + if (partid) *partid = ia64_sn_sysctl_partition_get(nasid); + if (coher) *coher = nasid >> 9; + if (reg) *reg = (HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_SHUB_ID)) & SH_SHUB_ID_NODES_PER_BIT_MASK) >> + SH_SHUB_ID_NODES_PER_BIT_SHFT; + return 0; + } +/***** END HACK *******/ + + if (ret_stuff.status < 0) + return ret_stuff.status; + + if (shubtype) *shubtype = ret_stuff.v0 & 0xff; + if (systemsize) *systemsize = (ret_stuff.v0 >> 8) & 0xff; + if (sharing_domain_size) *sharing_domain_size = (ret_stuff.v0 >> 16) & 0xff; + if (partid) *partid = (ret_stuff.v0 >> 24) & 0xff; + if (coher) *coher = (ret_stuff.v0 >> 32) & 0xff; + if (reg) *reg = (ret_stuff.v0 >> 40) & 0xff; + if (nasid_bitmask) *nasid_bitmask = (ret_stuff.v1 & 0xffff); + if (nasid_shift) *nasid_shift = (ret_stuff.v1 >> 16) & 0xff; + return 0; +} + +/* + * This is the access point to the Altix PROM hardware performance + * and status monitoring interface. For info on using this, see + * include/asm-ia64/sn/sn2/sn_hwperf.h + */ +static inline int +ia64_sn_hwperf_op(nasid_t nasid, u64 opcode, u64 a0, u64 a1, u64 a2, + u64 a3, u64 a4, int *v0) +{ + struct ia64_sal_retval rv; + SAL_CALL_NOLOCK(rv, SN_SAL_HWPERF_OP, (u64)nasid, + opcode, a0, a1, a2, a3, a4); + if (v0) + *v0 = (int) rv.v0; + return (int) rv.status; +} + +#endif /* _ASM_IA64_SN_SN_SAL_H */ diff --git a/include/asm-ia64/sn/sndrv.h b/include/asm-ia64/sn/sndrv.h new file mode 100644 index 000000000000..aa00d42cde32 --- /dev/null +++ b/include/asm-ia64/sn/sndrv.h @@ -0,0 +1,47 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2002-2004 Silicon Graphics, Inc. All Rights Reserved. + */ + +#ifndef _ASM_IA64_SN_SNDRV_H +#define _ASM_IA64_SN_SNDRV_H + +/* ioctl commands */ +#define SNDRV_GET_ROUTERINFO 1 +#define SNDRV_GET_INFOSIZE 2 +#define SNDRV_GET_HUBINFO 3 +#define SNDRV_GET_FLASHLOGSIZE 4 +#define SNDRV_SET_FLASHSYNC 5 +#define SNDRV_GET_FLASHLOGDATA 6 +#define SNDRV_GET_FLASHLOGALL 7 + +#define SNDRV_SET_HISTOGRAM_TYPE 14 + +#define SNDRV_ELSC_COMMAND 19 +#define SNDRV_CLEAR_LOG 20 +#define SNDRV_INIT_LOG 21 +#define SNDRV_GET_PIMM_PSC 22 +#define SNDRV_SET_PARTITION 23 +#define SNDRV_GET_PARTITION 24 + +/* see synergy_perf_ioctl() */ +#define SNDRV_GET_SYNERGY_VERSION 30 +#define SNDRV_GET_SYNERGY_STATUS 31 +#define SNDRV_GET_SYNERGYINFO 32 +#define SNDRV_SYNERGY_APPEND 33 +#define SNDRV_SYNERGY_ENABLE 34 +#define SNDRV_SYNERGY_FREQ 35 + +/* Devices */ +#define SNDRV_UKNOWN_DEVICE -1 +#define SNDRV_ROUTER_DEVICE 1 +#define SNDRV_HUB_DEVICE 2 +#define SNDRV_ELSC_NVRAM_DEVICE 3 +#define SNDRV_ELSC_CONTROLLER_DEVICE 4 +#define SNDRV_SYSCTL_SUBCH 5 +#define SNDRV_SYNERGY_DEVICE 6 + +#endif /* _ASM_IA64_SN_SNDRV_H */ diff --git a/include/asm-ia64/sn/types.h b/include/asm-ia64/sn/types.h new file mode 100644 index 000000000000..586ed47cae9c --- /dev/null +++ b/include/asm-ia64/sn/types.h @@ -0,0 +1,25 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (C) 1999 by Ralf Baechle + */ +#ifndef _ASM_IA64_SN_TYPES_H +#define _ASM_IA64_SN_TYPES_H + +#include <linux/types.h> + +typedef unsigned long cpuid_t; +typedef signed short nasid_t; /* node id in numa-as-id space */ +typedef signed char partid_t; /* partition ID type */ +typedef unsigned int moduleid_t; /* user-visible module number type */ +typedef unsigned int cmoduleid_t; /* kernel compact module id type */ +typedef signed char slabid_t; +typedef u64 nic_t; +typedef unsigned long iopaddr_t; +typedef unsigned long paddr_t; +typedef short cnodeid_t; + +#endif /* _ASM_IA64_SN_TYPES_H */ diff --git a/include/asm-ia64/socket.h b/include/asm-ia64/socket.h new file mode 100644 index 000000000000..21a9f10d6baa --- /dev/null +++ b/include/asm-ia64/socket.h @@ -0,0 +1,59 @@ +#ifndef _ASM_IA64_SOCKET_H +#define _ASM_IA64_SOCKET_H + +/* + * Socket related defines. + * + * Based on <asm-i386/socket.h>. + * + * Modified 1998-2000 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +#include <asm/sockios.h> + +/* For setsockopt(2) */ +#define SOL_SOCKET 1 + +#define SO_DEBUG 1 +#define SO_REUSEADDR 2 +#define SO_TYPE 3 +#define SO_ERROR 4 +#define SO_DONTROUTE 5 +#define SO_BROADCAST 6 +#define SO_SNDBUF 7 +#define SO_RCVBUF 8 +#define SO_KEEPALIVE 9 +#define SO_OOBINLINE 10 +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_LINGER 13 +#define SO_BSDCOMPAT 14 +/* To add :#define SO_REUSEPORT 15 */ +#define SO_PASSCRED 16 +#define SO_PEERCRED 17 +#define SO_RCVLOWAT 18 +#define SO_SNDLOWAT 19 +#define SO_RCVTIMEO 20 +#define SO_SNDTIMEO 21 + +/* Security levels - as per NRL IPv6 - don't actually do anything */ +#define SO_SECURITY_AUTHENTICATION 22 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 23 +#define SO_SECURITY_ENCRYPTION_NETWORK 24 + +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 + +#define SO_PEERNAME 28 +#define SO_TIMESTAMP 29 +#define SCM_TIMESTAMP SO_TIMESTAMP + +#define SO_ACCEPTCONN 30 + +#define SO_PEERSEC 31 + +#endif /* _ASM_IA64_SOCKET_H */ diff --git a/include/asm-ia64/sockios.h b/include/asm-ia64/sockios.h new file mode 100644 index 000000000000..cf94857c8a54 --- /dev/null +++ b/include/asm-ia64/sockios.h @@ -0,0 +1,19 @@ +#ifndef _ASM_IA64_SOCKIOS_H +#define _ASM_IA64_SOCKIOS_H + +/* + * Socket-level I/O control calls. + * + * Based on <asm-i386/sockios.h>. + * + * Modified 1998, 1999 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ +#define FIOSETOWN 0x8901 +#define SIOCSPGRP 0x8902 +#define FIOGETOWN 0x8903 +#define SIOCGPGRP 0x8904 +#define SIOCATMARK 0x8905 +#define SIOCGSTAMP 0x8906 /* Get stamp */ + +#endif /* _ASM_IA64_SOCKIOS_H */ diff --git a/include/asm-ia64/spinlock.h b/include/asm-ia64/spinlock.h new file mode 100644 index 000000000000..909936f25512 --- /dev/null +++ b/include/asm-ia64/spinlock.h @@ -0,0 +1,208 @@ +#ifndef _ASM_IA64_SPINLOCK_H +#define _ASM_IA64_SPINLOCK_H + +/* + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> + * + * This file is used for SMP configurations only. + */ + +#include <linux/compiler.h> +#include <linux/kernel.h> + +#include <asm/atomic.h> +#include <asm/bitops.h> +#include <asm/intrinsics.h> +#include <asm/system.h> + +typedef struct { + volatile unsigned int lock; +#ifdef CONFIG_PREEMPT + unsigned int break_lock; +#endif +} spinlock_t; + +#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } +#define spin_lock_init(x) ((x)->lock = 0) + +#ifdef ASM_SUPPORTED +/* + * Try to get the lock. If we fail to get the lock, make a non-standard call to + * ia64_spinlock_contention(). We do not use a normal call because that would force all + * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is + * carefully coded to touch only those registers that spin_lock() marks "clobbered". + */ + +#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" + +static inline void +_raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) +{ + register volatile unsigned int *ptr asm ("r31") = &lock->lock; + +#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) +# ifdef CONFIG_ITANIUM + /* don't use brl on Itanium... */ + asm volatile ("{\n\t" + " mov ar.ccv = r0\n\t" + " mov r28 = ip\n\t" + " mov r30 = 1;;\n\t" + "}\n\t" + "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t" + "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t" + "cmp4.ne p14, p0 = r30, r0\n\t" + "mov b6 = r29;;\n\t" + "mov r27=%2\n\t" + "(p14) br.cond.spnt.many b6" + : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); +# else + asm volatile ("{\n\t" + " mov ar.ccv = r0\n\t" + " mov r28 = ip\n\t" + " mov r30 = 1;;\n\t" + "}\n\t" + "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t" + "cmp4.ne p14, p0 = r30, r0\n\t" + "mov r27=%2\n\t" + "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;" + : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); +# endif /* CONFIG_MCKINLEY */ +#else +# ifdef CONFIG_ITANIUM + /* don't use brl on Itanium... */ + /* mis-declare, so we get the entry-point, not it's function descriptor: */ + asm volatile ("mov r30 = 1\n\t" + "mov r27=%2\n\t" + "mov ar.ccv = r0;;\n\t" + "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t" + "movl r29 = ia64_spinlock_contention;;\n\t" + "cmp4.ne p14, p0 = r30, r0\n\t" + "mov b6 = r29;;\n\t" + "(p14) br.call.spnt.many b6 = b6" + : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); +# else + asm volatile ("mov r30 = 1\n\t" + "mov r27=%2\n\t" + "mov ar.ccv = r0;;\n\t" + "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t" + "cmp4.ne p14, p0 = r30, r0\n\t" + "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;" + : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); +# endif /* CONFIG_MCKINLEY */ +#endif +} +#define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) +#else /* !ASM_SUPPORTED */ +#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) +# define _raw_spin_lock(x) \ +do { \ + __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ + __u64 ia64_spinlock_val; \ + ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ + if (unlikely(ia64_spinlock_val)) { \ + do { \ + while (*ia64_spinlock_ptr) \ + ia64_barrier(); \ + ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ + } while (ia64_spinlock_val); \ + } \ +} while (0) +#endif /* !ASM_SUPPORTED */ + +#define spin_is_locked(x) ((x)->lock != 0) +#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) +#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) +#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) + +typedef struct { + volatile unsigned int read_counter : 31; + volatile unsigned int write_lock : 1; +#ifdef CONFIG_PREEMPT + unsigned int break_lock; +#endif +} rwlock_t; +#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } + +#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) +#define read_can_lock(rw) (*(volatile int *)(rw) >= 0) +#define write_can_lock(rw) (*(volatile int *)(rw) == 0) + +#define _raw_read_lock(rw) \ +do { \ + rwlock_t *__read_lock_ptr = (rw); \ + \ + while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ + ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ + while (*(volatile int *)__read_lock_ptr < 0) \ + cpu_relax(); \ + } \ +} while (0) + +#define _raw_read_unlock(rw) \ +do { \ + rwlock_t *__read_lock_ptr = (rw); \ + ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ +} while (0) + +#ifdef ASM_SUPPORTED +#define _raw_write_lock(rw) \ +do { \ + __asm__ __volatile__ ( \ + "mov ar.ccv = r0\n" \ + "dep r29 = -1, r0, 31, 1;;\n" \ + "1:\n" \ + "ld4 r2 = [%0];;\n" \ + "cmp4.eq p0,p7 = r0,r2\n" \ + "(p7) br.cond.spnt.few 1b \n" \ + "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \ + "cmp4.eq p0,p7 = r0, r2\n" \ + "(p7) br.cond.spnt.few 1b;;\n" \ + :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ +} while(0) + +#define _raw_write_trylock(rw) \ +({ \ + register long result; \ + \ + __asm__ __volatile__ ( \ + "mov ar.ccv = r0\n" \ + "dep r29 = -1, r0, 31, 1;;\n" \ + "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \ + : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \ + (result == 0); \ +}) + +#else /* !ASM_SUPPORTED */ + +#define _raw_write_lock(l) \ +({ \ + __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ + __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ + do { \ + while (*ia64_write_lock_ptr) \ + ia64_barrier(); \ + ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \ + } while (ia64_val); \ +}) + +#define _raw_write_trylock(rw) \ +({ \ + __u64 ia64_val; \ + __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ + ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \ + (ia64_val == 0); \ +}) + +#endif /* !ASM_SUPPORTED */ + +#define _raw_read_trylock(lock) generic_raw_read_trylock(lock) + +#define _raw_write_unlock(x) \ +({ \ + smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \ + clear_bit(31, (x)); \ +}) + +#endif /* _ASM_IA64_SPINLOCK_H */ diff --git a/include/asm-ia64/stat.h b/include/asm-ia64/stat.h new file mode 100644 index 000000000000..367bb90cdffa --- /dev/null +++ b/include/asm-ia64/stat.h @@ -0,0 +1,51 @@ +#ifndef _ASM_IA64_STAT_H +#define _ASM_IA64_STAT_H + +/* + * Modified 1998, 1999 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +struct stat { + unsigned long st_dev; + unsigned long st_ino; + unsigned long st_nlink; + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int __pad0; + unsigned long st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + unsigned long st_blksize; + long st_blocks; + unsigned long __unused[3]; +}; + +#define STAT_HAVE_NSEC 1 + +struct ia64_oldstat { + unsigned int st_dev; + unsigned int st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_rdev; + unsigned int __pad1; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; + unsigned int st_blksize; + int st_blocks; + unsigned int __unused1; + unsigned int __unused2; +}; + +#endif /* _ASM_IA64_STAT_H */ diff --git a/include/asm-ia64/statfs.h b/include/asm-ia64/statfs.h new file mode 100644 index 000000000000..811097974f31 --- /dev/null +++ b/include/asm-ia64/statfs.h @@ -0,0 +1,62 @@ +#ifndef _ASM_IA64_STATFS_H +#define _ASM_IA64_STATFS_H + +/* + * Based on <asm-i386/statfs.h>. + * + * Modified 1998, 1999, 2003 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +#ifndef __KERNEL_STRICT_NAMES +# include <linux/types.h> +typedef __kernel_fsid_t fsid_t; +#endif + +/* + * This is ugly --- we're already 64-bit, so just duplicate the definitions + */ +struct statfs { + long f_type; + long f_bsize; + long f_blocks; + long f_bfree; + long f_bavail; + long f_files; + long f_ffree; + __kernel_fsid_t f_fsid; + long f_namelen; + long f_frsize; + long f_spare[5]; +}; + + +struct statfs64 { + long f_type; + long f_bsize; + long f_blocks; + long f_bfree; + long f_bavail; + long f_files; + long f_ffree; + __kernel_fsid_t f_fsid; + long f_namelen; + long f_frsize; + long f_spare[5]; +}; + +struct compat_statfs64 { + __u32 f_type; + __u32 f_bsize; + __u64 f_blocks; + __u64 f_bfree; + __u64 f_bavail; + __u64 f_files; + __u64 f_ffree; + __kernel_fsid_t f_fsid; + __u32 f_namelen; + __u32 f_frsize; + __u32 f_spare[5]; +} __attribute__((packed)); + +#endif /* _ASM_IA64_STATFS_H */ diff --git a/include/asm-ia64/string.h b/include/asm-ia64/string.h new file mode 100644 index 000000000000..43502d3b57e5 --- /dev/null +++ b/include/asm-ia64/string.h @@ -0,0 +1,22 @@ +#ifndef _ASM_IA64_STRING_H +#define _ASM_IA64_STRING_H + +/* + * Here is where we want to put optimized versions of the string + * routines. + * + * Copyright (C) 1998-2000, 2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/config.h> /* remove this once we remove the A-step workaround... */ + +#define __HAVE_ARCH_STRLEN 1 /* see arch/ia64/lib/strlen.S */ +#define __HAVE_ARCH_MEMSET 1 /* see arch/ia64/lib/memset.S */ +#define __HAVE_ARCH_MEMCPY 1 /* see arch/ia64/lib/memcpy.S */ + +extern __kernel_size_t strlen (const char *); +extern void *memcpy (void *, const void *, __kernel_size_t); +extern void *memset (void *, int, __kernel_size_t); + +#endif /* _ASM_IA64_STRING_H */ diff --git a/include/asm-ia64/suspend.h b/include/asm-ia64/suspend.h new file mode 100644 index 000000000000..b05bbb6074e2 --- /dev/null +++ b/include/asm-ia64/suspend.h @@ -0,0 +1 @@ +/* dummy (must be non-empty to prevent prejudicial removal...) */ diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h new file mode 100644 index 000000000000..6f516e76d1f0 --- /dev/null +++ b/include/asm-ia64/system.h @@ -0,0 +1,295 @@ +#ifndef _ASM_IA64_SYSTEM_H +#define _ASM_IA64_SYSTEM_H + +/* + * System defines. Note that this is included both from .c and .S + * files, so it does only defines, not any C code. This is based + * on information published in the Processor Abstraction Layer + * and the System Abstraction Layer manual. + * + * Copyright (C) 1998-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> + * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> + */ +#include <linux/config.h> + +#include <asm/kregs.h> +#include <asm/page.h> +#include <asm/pal.h> +#include <asm/percpu.h> + +#define GATE_ADDR __IA64_UL_CONST(0xa000000000000000) +/* + * 0xa000000000000000+2*PERCPU_PAGE_SIZE + * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) + */ +#define KERNEL_START __IA64_UL_CONST(0xa000000100000000) +#define PERCPU_ADDR (-PERCPU_PAGE_SIZE) + +#ifndef __ASSEMBLY__ + +#include <linux/kernel.h> +#include <linux/types.h> + +struct pci_vector_struct { + __u16 segment; /* PCI Segment number */ + __u16 bus; /* PCI Bus number */ + __u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */ + __u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */ + __u32 irq; /* IRQ assigned */ +}; + +extern struct ia64_boot_param { + __u64 command_line; /* physical address of command line arguments */ + __u64 efi_systab; /* physical address of EFI system table */ + __u64 efi_memmap; /* physical address of EFI memory map */ + __u64 efi_memmap_size; /* size of EFI memory map */ + __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ + __u32 efi_memdesc_version; /* memory descriptor version */ + struct { + __u16 num_cols; /* number of columns on console output device */ + __u16 num_rows; /* number of rows on console output device */ + __u16 orig_x; /* cursor's x position */ + __u16 orig_y; /* cursor's y position */ + } console_info; + __u64 fpswa; /* physical address of the fpswa interface */ + __u64 initrd_start; + __u64 initrd_size; +} *ia64_boot_param; + +/* + * Macros to force memory ordering. In these descriptions, "previous" + * and "subsequent" refer to program order; "visible" means that all + * architecturally visible effects of a memory access have occurred + * (at a minimum, this means the memory has been read or written). + * + * wmb(): Guarantees that all preceding stores to memory- + * like regions are visible before any subsequent + * stores and that all following stores will be + * visible only after all previous stores. + * rmb(): Like wmb(), but for reads. + * mb(): wmb()/rmb() combo, i.e., all previous memory + * accesses are visible before all subsequent + * accesses and vice versa. This is also known as + * a "fence." + * + * Note: "mb()" and its variants cannot be used as a fence to order + * accesses to memory mapped I/O registers. For that, mf.a needs to + * be used. However, we don't want to always use mf.a because (a) + * it's (presumably) much slower than mf and (b) mf.a is supported for + * sequential memory pages only. + */ +#define mb() ia64_mf() +#define rmb() mb() +#define wmb() mb() +#define read_barrier_depends() do { } while(0) + +#ifdef CONFIG_SMP +# define smp_mb() mb() +# define smp_rmb() rmb() +# define smp_wmb() wmb() +# define smp_read_barrier_depends() read_barrier_depends() +#else +# define smp_mb() barrier() +# define smp_rmb() barrier() +# define smp_wmb() barrier() +# define smp_read_barrier_depends() do { } while(0) +#endif + +/* + * XXX check on these---I suspect what Linus really wants here is + * acquire vs release semantics but we can't discuss this stuff with + * Linus just yet. Grrr... + */ +#define set_mb(var, value) do { (var) = (value); mb(); } while (0) +#define set_wmb(var, value) do { (var) = (value); mb(); } while (0) + +#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */ + +/* + * The group barrier in front of the rsm & ssm are necessary to ensure + * that none of the previous instructions in the same group are + * affected by the rsm/ssm. + */ +/* For spinlocks etc */ + +/* + * - clearing psr.i is implicitly serialized (visible by next insn) + * - setting psr.i requires data serialization + * - we need a stop-bit before reading PSR because we sometimes + * write a floating-point register right before reading the PSR + * and that writes to PSR.mfl + */ +#define __local_irq_save(x) \ +do { \ + ia64_stop(); \ + (x) = ia64_getreg(_IA64_REG_PSR); \ + ia64_stop(); \ + ia64_rsm(IA64_PSR_I); \ +} while (0) + +#define __local_irq_disable() \ +do { \ + ia64_stop(); \ + ia64_rsm(IA64_PSR_I); \ +} while (0) + +#define __local_irq_restore(x) ia64_intrin_local_irq_restore((x) & IA64_PSR_I) + +#ifdef CONFIG_IA64_DEBUG_IRQ + + extern unsigned long last_cli_ip; + +# define __save_ip() last_cli_ip = ia64_getreg(_IA64_REG_IP) + +# define local_irq_save(x) \ +do { \ + unsigned long psr; \ + \ + __local_irq_save(psr); \ + if (psr & IA64_PSR_I) \ + __save_ip(); \ + (x) = psr; \ +} while (0) + +# define local_irq_disable() do { unsigned long x; local_irq_save(x); } while (0) + +# define local_irq_restore(x) \ +do { \ + unsigned long old_psr, psr = (x); \ + \ + local_save_flags(old_psr); \ + __local_irq_restore(psr); \ + if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) \ + __save_ip(); \ +} while (0) + +#else /* !CONFIG_IA64_DEBUG_IRQ */ +# define local_irq_save(x) __local_irq_save(x) +# define local_irq_disable() __local_irq_disable() +# define local_irq_restore(x) __local_irq_restore(x) +#endif /* !CONFIG_IA64_DEBUG_IRQ */ + +#define local_irq_enable() ({ ia64_stop(); ia64_ssm(IA64_PSR_I); ia64_srlz_d(); }) +#define local_save_flags(flags) ({ ia64_stop(); (flags) = ia64_getreg(_IA64_REG_PSR); }) + +#define irqs_disabled() \ +({ \ + unsigned long __ia64_id_flags; \ + local_save_flags(__ia64_id_flags); \ + (__ia64_id_flags & IA64_PSR_I) == 0; \ +}) + +#ifdef __KERNEL__ + +#define prepare_to_switch() do { } while(0) + +#ifdef CONFIG_IA32_SUPPORT +# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0) +#else +# define IS_IA32_PROCESS(regs) 0 +struct task_struct; +static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){} +static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){} +#endif + +/* + * Context switch from one thread to another. If the two threads have + * different address spaces, schedule() has already taken care of + * switching to the new address space by calling switch_mm(). + * + * Disabling access to the fph partition and the debug-register + * context switch MUST be done before calling ia64_switch_to() since a + * newly created thread returns directly to + * ia64_ret_from_syscall_clear_r8. + */ +extern struct task_struct *ia64_switch_to (void *next_task); + +struct task_struct; + +extern void ia64_save_extra (struct task_struct *task); +extern void ia64_load_extra (struct task_struct *task); + +#ifdef CONFIG_PERFMON + DECLARE_PER_CPU(unsigned long, pfm_syst_info); +# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) +#else +# define PERFMON_IS_SYSWIDE() (0) +#endif + +#define IA64_HAS_EXTRA_STATE(t) \ + ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ + || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE()) + +#define __switch_to(prev,next,last) do { \ + if (IA64_HAS_EXTRA_STATE(prev)) \ + ia64_save_extra(prev); \ + if (IA64_HAS_EXTRA_STATE(next)) \ + ia64_load_extra(next); \ + ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ + (last) = ia64_switch_to((next)); \ +} while (0) + +#ifdef CONFIG_SMP +/* + * In the SMP case, we save the fph state when context-switching away from a thread that + * modified fph. This way, when the thread gets scheduled on another CPU, the CPU can + * pick up the state from task->thread.fph, avoiding the complication of having to fetch + * the latest fph state from another CPU. In other words: eager save, lazy restore. + */ +# define switch_to(prev,next,last) do { \ + if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ + ia64_psr(ia64_task_regs(prev))->mfh = 0; \ + (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ + __ia64_save_fpu((prev)->thread.fph); \ + } \ + __switch_to(prev, next, last); \ +} while (0) +#else +# define switch_to(prev,next,last) __switch_to(prev, next, last) +#endif + +/* + * On IA-64, we don't want to hold the runqueue's lock during the low-level context-switch, + * because that could cause a deadlock. Here is an example by Erich Focht: + * + * Example: + * CPU#0: + * schedule() + * -> spin_lock_irq(&rq->lock) + * -> context_switch() + * -> wrap_mmu_context() + * -> read_lock(&tasklist_lock) + * + * CPU#1: + * sys_wait4() or release_task() or forget_original_parent() + * -> write_lock(&tasklist_lock) + * -> do_notify_parent() + * -> wake_up_parent() + * -> try_to_wake_up() + * -> spin_lock_irq(&parent_rq->lock) + * + * If the parent's rq happens to be on CPU#0, we'll wait for the rq->lock + * of that CPU which will not be released, because there we wait for the + * tasklist_lock to become available. + */ +#define prepare_arch_switch(rq, next) \ +do { \ + spin_lock(&(next)->switch_lock); \ + spin_unlock(&(rq)->lock); \ +} while (0) +#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock) +#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) + +#define ia64_platform_is(x) (strcmp(x, platform_name) == 0) + +void cpu_idle_wait(void); + +#define arch_align_stack(x) (x) + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_IA64_SYSTEM_H */ diff --git a/include/asm-ia64/termbits.h b/include/asm-ia64/termbits.h new file mode 100644 index 000000000000..b9e843f7dc42 --- /dev/null +++ b/include/asm-ia64/termbits.h @@ -0,0 +1,182 @@ +#ifndef _ASM_IA64_TERMBITS_H +#define _ASM_IA64_TERMBITS_H + +/* + * Based on <asm-i386/termbits.h>. + * + * Modified 1999 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + * + * 99/01/28 Added new baudrates + */ + +#include <linux/posix_types.h> + +typedef unsigned char cc_t; +typedef unsigned int speed_t; +typedef unsigned int tcflag_t; + +#define NCCS 19 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_line; /* line discipline */ + cc_t c_cc[NCCS]; /* control characters */ +}; + +/* c_cc characters */ +#define VINTR 0 +#define VQUIT 1 +#define VERASE 2 +#define VKILL 3 +#define VEOF 4 +#define VTIME 5 +#define VMIN 6 +#define VSWTC 7 +#define VSTART 8 +#define VSTOP 9 +#define VSUSP 10 +#define VEOL 11 +#define VREPRINT 12 +#define VDISCARD 13 +#define VWERASE 14 +#define VLNEXT 15 +#define VEOL2 16 + +/* c_iflag bits */ +#define IGNBRK 0000001 +#define BRKINT 0000002 +#define IGNPAR 0000004 +#define PARMRK 0000010 +#define INPCK 0000020 +#define ISTRIP 0000040 +#define INLCR 0000100 +#define IGNCR 0000200 +#define ICRNL 0000400 +#define IUCLC 0001000 +#define IXON 0002000 +#define IXANY 0004000 +#define IXOFF 0010000 +#define IMAXBEL 0020000 +#define IUTF8 0040000 + +/* c_oflag bits */ +#define OPOST 0000001 +#define OLCUC 0000002 +#define ONLCR 0000004 +#define OCRNL 0000010 +#define ONOCR 0000020 +#define ONLRET 0000040 +#define OFILL 0000100 +#define OFDEL 0000200 +#define NLDLY 0000400 +#define NL0 0000000 +#define NL1 0000400 +#define CRDLY 0003000 +#define CR0 0000000 +#define CR1 0001000 +#define CR2 0002000 +#define CR3 0003000 +#define TABDLY 0014000 +#define TAB0 0000000 +#define TAB1 0004000 +#define TAB2 0010000 +#define TAB3 0014000 +#define XTABS 0014000 +#define BSDLY 0020000 +#define BS0 0000000 +#define BS1 0020000 +#define VTDLY 0040000 +#define VT0 0000000 +#define VT1 0040000 +#define FFDLY 0100000 +#define FF0 0000000 +#define FF1 0100000 + +/* c_cflag bit meaning */ +#define CBAUD 0010017 +#define B0 0000000 /* hang up */ +#define B50 0000001 +#define B75 0000002 +#define B110 0000003 +#define B134 0000004 +#define B150 0000005 +#define B200 0000006 +#define B300 0000007 +#define B600 0000010 +#define B1200 0000011 +#define B1800 0000012 +#define B2400 0000013 +#define B4800 0000014 +#define B9600 0000015 +#define B19200 0000016 +#define B38400 0000017 +#define EXTA B19200 +#define EXTB B38400 +#define CSIZE 0000060 +#define CS5 0000000 +#define CS6 0000020 +#define CS7 0000040 +#define CS8 0000060 +#define CSTOPB 0000100 +#define CREAD 0000200 +#define PARENB 0000400 +#define PARODD 0001000 +#define HUPCL 0002000 +#define CLOCAL 0004000 +#define CBAUDEX 0010000 +#define B57600 0010001 +#define B115200 0010002 +#define B230400 0010003 +#define B460800 0010004 +#define B500000 0010005 +#define B576000 0010006 +#define B921600 0010007 +#define B1000000 0010010 +#define B1152000 0010011 +#define B1500000 0010012 +#define B2000000 0010013 +#define B2500000 0010014 +#define B3000000 0010015 +#define B3500000 0010016 +#define B4000000 0010017 +#define CIBAUD 002003600000 /* input baud rate (not used) */ +#define CMSPAR 010000000000 /* mark or space (stick) parity */ +#define CRTSCTS 020000000000 /* flow control */ + +/* c_lflag bits */ +#define ISIG 0000001 +#define ICANON 0000002 +#define XCASE 0000004 +#define ECHO 0000010 +#define ECHOE 0000020 +#define ECHOK 0000040 +#define ECHONL 0000100 +#define NOFLSH 0000200 +#define TOSTOP 0000400 +#define ECHOCTL 0001000 +#define ECHOPRT 0002000 +#define ECHOKE 0004000 +#define FLUSHO 0010000 +#define PENDIN 0040000 +#define IEXTEN 0100000 + +/* tcflow() and TCXONC use these */ +#define TCOOFF 0 +#define TCOON 1 +#define TCIOFF 2 +#define TCION 3 + +/* tcflush() and TCFLSH use these */ +#define TCIFLUSH 0 +#define TCOFLUSH 1 +#define TCIOFLUSH 2 + +/* tcsetattr uses these */ +#define TCSANOW 0 +#define TCSADRAIN 1 +#define TCSAFLUSH 2 + +#endif /* _ASM_IA64_TERMBITS_H */ diff --git a/include/asm-ia64/termios.h b/include/asm-ia64/termios.h new file mode 100644 index 000000000000..42c95693240c --- /dev/null +++ b/include/asm-ia64/termios.h @@ -0,0 +1,113 @@ +#ifndef _ASM_IA64_TERMIOS_H +#define _ASM_IA64_TERMIOS_H + +/* + * Modified 1999 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + * + * 99/01/28 Added N_IRDA and N_SMSBLOCK + */ + +#include <asm/termbits.h> +#include <asm/ioctls.h> + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +/* modem lines */ +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ + +/* line disciplines */ +#define N_TTY 0 +#define N_SLIP 1 +#define N_MOUSE 2 +#define N_PPP 3 +#define N_STRIP 4 +#define N_AX25 5 +#define N_X25 6 /* X.25 async */ +#define N_6PACK 7 +#define N_MASC 8 /* Reserved for Mobitex module <kaz@cafe.net> */ +#define N_R3964 9 /* Reserved for Simatic R3964 module */ +#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */ +#define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */ +#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS msgs */ +#define N_HDLC 13 /* synchronous HDLC */ +#define N_SYNC_PPP 14 /* synchronous PPP */ +#define N_HCI 15 /* Bluetooth HCI UART */ + +# ifdef __KERNEL__ + +/* intr=^C quit=^\ erase=del kill=^U + eof=^D vtime=\0 vmin=\1 sxtc=\0 + start=^Q stop=^S susp=^Z eol=\0 + reprint=^R discard=^U werase=^W lnext=^V + eol2=\0 +*/ +#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ + unsigned short __tmp; \ + get_user(__tmp,&(termio)->x); \ + *(unsigned short *) &(termios)->x = __tmp; \ +} + +#define user_termio_to_kernel_termios(termios, termio) \ +({ \ + SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ + copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ +}) + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +#define kernel_termios_to_user_termio(termio, termios) \ +({ \ + put_user((termios)->c_iflag, &(termio)->c_iflag); \ + put_user((termios)->c_oflag, &(termio)->c_oflag); \ + put_user((termios)->c_cflag, &(termio)->c_cflag); \ + put_user((termios)->c_lflag, &(termio)->c_lflag); \ + put_user((termios)->c_line, &(termio)->c_line); \ + copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ +}) + +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) + +# endif /* __KERNEL__ */ + +#endif /* _ASM_IA64_TERMIOS_H */ diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h new file mode 100644 index 000000000000..8d5b7e77028c --- /dev/null +++ b/include/asm-ia64/thread_info.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2002-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ +#ifndef _ASM_IA64_THREAD_INFO_H +#define _ASM_IA64_THREAD_INFO_H + +#include <asm/offsets.h> +#include <asm/processor.h> +#include <asm/ptrace.h> + +#define PREEMPT_ACTIVE_BIT 30 +#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) + +#ifndef __ASSEMBLY__ + +/* + * On IA-64, we want to keep the task structure and kernel stack together, so they can be + * mapped by a single TLB entry and so they can be addressed by the "current" pointer + * without having to do pointer masking. + */ +struct thread_info { + struct task_struct *task; /* XXX not really needed, except for dup_task_struct() */ + struct exec_domain *exec_domain;/* execution domain */ + __u32 flags; /* thread_info flags (see TIF_*) */ + __u32 cpu; /* current CPU */ + mm_segment_t addr_limit; /* user-level address space limit */ + __s32 preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ + struct restart_block restart_block; + struct { + int signo; + int code; + void __user *addr; + unsigned long start_time; + pid_t pid; + } sigdelayed; /* Saved information for TIF_SIGDELAYED */ +}; + +#define THREAD_SIZE KERNEL_STACK_SIZE + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .cpu = 0, \ + .addr_limit = KERNEL_DS, \ + .preempt_count = 0, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +/* how to get the thread information struct from C */ +#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) +#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) +#define free_thread_info(ti) /* nothing */ + +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR +#define alloc_task_struct() ((task_t *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER)) +#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER) + +#endif /* !__ASSEMBLY */ + +/* + * thread information flags + * - these are process state flags that various assembly files may need to access + * - pending work-to-be-done flags are in least-significant 16 bits, other flags + * in top 16 bits + */ +#define TIF_NOTIFY_RESUME 0 /* resumption notification requested */ +#define TIF_SIGPENDING 1 /* signal pending */ +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ +#define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */ +#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ +#define TIF_MEMDIE 17 + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) + +/* "work to do on user-return" bits */ +#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED) +/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ +#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) + +#endif /* _ASM_IA64_THREAD_INFO_H */ diff --git a/include/asm-ia64/timex.h b/include/asm-ia64/timex.h new file mode 100644 index 000000000000..414aae060440 --- /dev/null +++ b/include/asm-ia64/timex.h @@ -0,0 +1,40 @@ +#ifndef _ASM_IA64_TIMEX_H +#define _ASM_IA64_TIMEX_H + +/* + * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ +/* + * 2001/01/18 davidm Removed CLOCK_TICK_RATE. It makes no sense on IA-64. + * Also removed cacheflush_time as it's entirely unused. + */ + +#include <asm/intrinsics.h> +#include <asm/processor.h> + +typedef unsigned long cycles_t; + +/* + * For performance reasons, we don't want to define CLOCK_TICK_TRATE as + * local_cpu_data->itc_rate. Fortunately, we don't have to, either: according to George + * Anzinger, 1/CLOCK_TICK_RATE is taken as the resolution of the timer clock. The time + * calculation assumes that you will use enough of these so that your tick size <= 1/HZ. + * If the calculation shows that your CLOCK_TICK_RATE can not supply exactly 1/HZ ticks, + * the actual value is calculated and used to update the wall clock each jiffie. Setting + * the CLOCK_TICK_RATE to x*HZ insures that the calculation will find no errors. Hence we + * pick a multiple of HZ which gives us a (totally virtual) CLOCK_TICK_RATE of about + * 100MHz. + */ +#define CLOCK_TICK_RATE (HZ * 100000UL) + +static inline cycles_t +get_cycles (void) +{ + cycles_t ret; + + ret = ia64_getreg(_IA64_REG_AR_ITC); + return ret; +} + +#endif /* _ASM_IA64_TIMEX_H */ diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h new file mode 100644 index 000000000000..3a9a6d1be75c --- /dev/null +++ b/include/asm-ia64/tlb.h @@ -0,0 +1,245 @@ +#ifndef _ASM_IA64_TLB_H +#define _ASM_IA64_TLB_H +/* + * Based on <asm-generic/tlb.h>. + * + * Copyright (C) 2002-2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ +/* + * Removing a translation from a page table (including TLB-shootdown) is a four-step + * procedure: + * + * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory + * (this is a no-op on ia64). + * (2) Clear the relevant portions of the page-table + * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs + * (4) Release the pages that were freed up in step (2). + * + * Note that the ordering of these steps is crucial to avoid races on MP machines. + * + * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When + * unmapping a portion of the virtual address space, these hooks are called according to + * the following template: + * + * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM + * { + * for each vma that needs a shootdown do { + * tlb_start_vma(tlb, vma); + * for each page-table-entry PTE that needs to be removed do { + * tlb_remove_tlb_entry(tlb, pte, address); + * if (pte refers to a normal page) { + * tlb_remove_page(tlb, page); + * } + * } + * tlb_end_vma(tlb, vma); + * } + * } + * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM + */ +#include <linux/config.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/swap.h> + +#include <asm/pgalloc.h> +#include <asm/processor.h> +#include <asm/tlbflush.h> +#include <asm/machvec.h> + +#ifdef CONFIG_SMP +# define FREE_PTE_NR 2048 +# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) +#else +# define FREE_PTE_NR 0 +# define tlb_fast_mode(tlb) (1) +#endif + +struct mmu_gather { + struct mm_struct *mm; + unsigned int nr; /* == ~0U => fast mode */ + unsigned char fullmm; /* non-zero means full mm flush */ + unsigned char need_flush; /* really unmapped some PTEs? */ + unsigned long freed; /* number of pages freed */ + unsigned long start_addr; + unsigned long end_addr; + struct page *pages[FREE_PTE_NR]; +}; + +/* Users of the generic TLB shootdown code must declare this storage space. */ +DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); + +/* + * Flush the TLB for address range START to END and, if not in fast mode, release the + * freed pages that where gathered up to this point. + */ +static inline void +ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) +{ + unsigned int nr; + + if (!tlb->need_flush) + return; + tlb->need_flush = 0; + + if (tlb->fullmm) { + /* + * Tearing down the entire address space. This happens both as a result + * of exit() and execve(). The latter case necessitates the call to + * flush_tlb_mm() here. + */ + flush_tlb_mm(tlb->mm); + } else if (unlikely (end - start >= 1024*1024*1024*1024UL + || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) + { + /* + * If we flush more than a tera-byte or across regions, we're probably + * better off just flushing the entire TLB(s). This should be very rare + * and is not worth optimizing for. + */ + flush_tlb_all(); + } else { + /* + * XXX fix me: flush_tlb_range() should take an mm pointer instead of a + * vma pointer. + */ + struct vm_area_struct vma; + + vma.vm_mm = tlb->mm; + /* flush the address range from the tlb: */ + flush_tlb_range(&vma, start, end); + /* now flush the virt. page-table area mapping the address range: */ + flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); + } + + /* lastly, release the freed pages */ + nr = tlb->nr; + if (!tlb_fast_mode(tlb)) { + unsigned long i; + tlb->nr = 0; + tlb->start_addr = ~0UL; + for (i = 0; i < nr; ++i) + free_page_and_swap_cache(tlb->pages[i]); + } +} + +/* + * Return a pointer to an initialized struct mmu_gather. + */ +static inline struct mmu_gather * +tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) +{ + struct mmu_gather *tlb = &__get_cpu_var(mmu_gathers); + + tlb->mm = mm; + /* + * Use fast mode if only 1 CPU is online. + * + * It would be tempting to turn on fast-mode for full_mm_flush as well. But this + * doesn't work because of speculative accesses and software prefetching: the page + * table of "mm" may (and usually is) the currently active page table and even + * though the kernel won't do any user-space accesses during the TLB shoot down, a + * compiler might use speculation or lfetch.fault on what happens to be a valid + * user-space address. This in turn could trigger a TLB miss fault (or a VHPT + * walk) and re-insert a TLB entry we just removed. Slow mode avoids such + * problems. (We could make fast-mode work by switching the current task to a + * different "mm" during the shootdown.) --davidm 08/02/2002 + */ + tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; + tlb->fullmm = full_mm_flush; + tlb->freed = 0; + tlb->start_addr = ~0UL; + return tlb; +} + +/* + * Called at the end of the shootdown operation to free up any resources that were + * collected. The page table lock is still held at this point. + */ +static inline void +tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) +{ + unsigned long freed = tlb->freed; + struct mm_struct *mm = tlb->mm; + unsigned long rss = get_mm_counter(mm, rss); + + if (rss < freed) + freed = rss; + add_mm_counter(mm, rss, -freed); + /* + * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and + * tlb->end_addr. + */ + ia64_tlb_flush_mmu(tlb, start, end); + + /* keep the page table cache within bounds */ + check_pgt_cache(); +} + +static inline unsigned int +tlb_is_full_mm(struct mmu_gather *tlb) +{ + return tlb->fullmm; +} + +/* + * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page + * must be delayed until after the TLB has been flushed (see comments at the beginning of + * this file). + */ +static inline void +tlb_remove_page (struct mmu_gather *tlb, struct page *page) +{ + tlb->need_flush = 1; + + if (tlb_fast_mode(tlb)) { + free_page_and_swap_cache(page); + return; + } + tlb->pages[tlb->nr++] = page; + if (tlb->nr >= FREE_PTE_NR) + ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); +} + +/* + * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any + * PTE, not just those pointing to (normal) physical memory. + */ +static inline void +__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) +{ + if (tlb->start_addr == ~0UL) + tlb->start_addr = address; + tlb->end_addr = address + PAGE_SIZE; +} + +#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) + +#define tlb_start_vma(tlb, vma) do { } while (0) +#define tlb_end_vma(tlb, vma) do { } while (0) + +#define tlb_remove_tlb_entry(tlb, ptep, addr) \ +do { \ + tlb->need_flush = 1; \ + __tlb_remove_tlb_entry(tlb, ptep, addr); \ +} while (0) + +#define pte_free_tlb(tlb, ptep) \ +do { \ + tlb->need_flush = 1; \ + __pte_free_tlb(tlb, ptep); \ +} while (0) + +#define pmd_free_tlb(tlb, ptep) \ +do { \ + tlb->need_flush = 1; \ + __pmd_free_tlb(tlb, ptep); \ +} while (0) + +#define pud_free_tlb(tlb, pudp) \ +do { \ + tlb->need_flush = 1; \ + __pud_free_tlb(tlb, pudp); \ +} while (0) + +#endif /* _ASM_IA64_TLB_H */ diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h new file mode 100644 index 000000000000..b65c62702724 --- /dev/null +++ b/include/asm-ia64/tlbflush.h @@ -0,0 +1,99 @@ +#ifndef _ASM_IA64_TLBFLUSH_H +#define _ASM_IA64_TLBFLUSH_H + +/* + * Copyright (C) 2002 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/config.h> + +#include <linux/mm.h> + +#include <asm/intrinsics.h> +#include <asm/mmu_context.h> +#include <asm/page.h> + +/* + * Now for some TLB flushing routines. This is the kind of stuff that + * can be very expensive, so try to avoid them whenever possible. + */ + +/* + * Flush everything (kernel mapping may also have changed due to + * vmalloc/vfree). + */ +extern void local_flush_tlb_all (void); + +#ifdef CONFIG_SMP + extern void smp_flush_tlb_all (void); + extern void smp_flush_tlb_mm (struct mm_struct *mm); +# define flush_tlb_all() smp_flush_tlb_all() +#else +# define flush_tlb_all() local_flush_tlb_all() +#endif + +static inline void +local_finish_flush_tlb_mm (struct mm_struct *mm) +{ + if (mm == current->active_mm) + activate_context(mm); +} + +/* + * Flush a specified user mapping. This is called, e.g., as a result of fork() and + * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect + * the PTEs of the parent task. + */ +static inline void +flush_tlb_mm (struct mm_struct *mm) +{ + if (!mm) + return; + + mm->context = 0; + + if (atomic_read(&mm->mm_users) == 0) + return; /* happens as a result of exit_mmap() */ + +#ifdef CONFIG_SMP + smp_flush_tlb_mm(mm); +#else + local_finish_flush_tlb_mm(mm); +#endif +} + +extern void flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end); + +/* + * Page-granular tlb flush. + */ +static inline void +flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) +{ +#ifdef CONFIG_SMP + flush_tlb_range(vma, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE); +#else + if (vma->vm_mm == current->active_mm) + ia64_ptcl(addr, (PAGE_SHIFT << 2)); + else + vma->vm_mm->context = 0; +#endif +} + +/* + * Flush the TLB entries mapping the virtually mapped linear page + * table corresponding to address range [START-END). + */ +static inline void +flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end) +{ + /* + * Deprecated. The virtual page table is now flushed via the normal gather/flush + * interface (see tlb.h). + */ +} + +#define flush_tlb_kernel_range(start, end) flush_tlb_all() /* XXX fix me */ + +#endif /* _ASM_IA64_TLBFLUSH_H */ diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h new file mode 100644 index 000000000000..21cf351fd05c --- /dev/null +++ b/include/asm-ia64/topology.h @@ -0,0 +1,90 @@ +/* + * linux/include/asm-ia64/topology.h + * + * Copyright (C) 2002, Erich Focht, NEC + * + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ +#ifndef _ASM_IA64_TOPOLOGY_H +#define _ASM_IA64_TOPOLOGY_H + +#include <asm/acpi.h> +#include <asm/numa.h> +#include <asm/smp.h> + +#ifdef CONFIG_NUMA +/* + * Returns the number of the node containing CPU 'cpu' + */ +#define cpu_to_node(cpu) (int)(cpu_to_node_map[cpu]) + +/* + * Returns a bitmask of CPUs on Node 'node'. + */ +#define node_to_cpumask(node) (node_to_cpu_mask[node]) + +/* + * Returns the number of the node containing Node 'nid'. + * Not implemented here. Multi-level hierarchies detected with + * the help of node_distance(). + */ +#define parent_node(nid) (nid) + +/* + * Returns the number of the first CPU on Node 'node'. + */ +#define node_to_first_cpu(node) (__ffs(node_to_cpumask(node))) + +void build_cpu_to_node_map(void); + +/* sched_domains SD_NODE_INIT for IA64 NUMA machines */ +#define SD_NODE_INIT (struct sched_domain) { \ + .span = CPU_MASK_NONE, \ + .parent = NULL, \ + .groups = NULL, \ + .min_interval = 80, \ + .max_interval = 320, \ + .busy_factor = 320, \ + .imbalance_pct = 125, \ + .cache_hot_time = (10*1000000), \ + .cache_nice_tries = 1, \ + .per_cpu_gain = 100, \ + .flags = SD_LOAD_BALANCE \ + | SD_BALANCE_EXEC \ + | SD_BALANCE_NEWIDLE \ + | SD_WAKE_IDLE \ + | SD_WAKE_BALANCE, \ + .last_balance = jiffies, \ + .balance_interval = 1, \ + .nr_balance_failed = 0, \ +} + +/* sched_domains SD_ALLNODES_INIT for IA64 NUMA machines */ +#define SD_ALLNODES_INIT (struct sched_domain) { \ + .span = CPU_MASK_NONE, \ + .parent = NULL, \ + .groups = NULL, \ + .min_interval = 80, \ + .max_interval = 320, \ + .busy_factor = 320, \ + .imbalance_pct = 125, \ + .cache_hot_time = (10*1000000), \ + .cache_nice_tries = 1, \ + .per_cpu_gain = 100, \ + .flags = SD_LOAD_BALANCE \ + | SD_BALANCE_EXEC, \ + .last_balance = jiffies, \ + .balance_interval = 100*(63+num_online_cpus())/64, \ + .nr_balance_failed = 0, \ +} + +#endif /* CONFIG_NUMA */ + +#include <asm-generic/topology.h> + +#endif /* _ASM_IA64_TOPOLOGY_H */ diff --git a/include/asm-ia64/types.h b/include/asm-ia64/types.h new file mode 100644 index 000000000000..a677565aa954 --- /dev/null +++ b/include/asm-ia64/types.h @@ -0,0 +1,75 @@ +#ifndef _ASM_IA64_TYPES_H +#define _ASM_IA64_TYPES_H + +/* + * This file is never included by application software unless explicitly requested (e.g., + * via linux/types.h) in which case the application is Linux specific so (user-) name + * space pollution is not a major issue. However, for interoperability, libraries still + * need to be careful to avoid a name clashes. + * + * Based on <asm-alpha/types.h>. + * + * Modified 1998-2000, 2002 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +#ifdef __ASSEMBLY__ +# define __IA64_UL(x) (x) +# define __IA64_UL_CONST(x) x + +# ifdef __KERNEL__ +# define BITS_PER_LONG 64 +# endif + +#else +# define __IA64_UL(x) ((unsigned long)(x)) +# define __IA64_UL_CONST(x) x##UL + +typedef unsigned int umode_t; + +/* + * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the + * header files exported to user space + */ + +typedef __signed__ char __s8; +typedef unsigned char __u8; + +typedef __signed__ short __s16; +typedef unsigned short __u16; + +typedef __signed__ int __s32; +typedef unsigned int __u32; + +typedef __signed__ long __s64; +typedef unsigned long __u64; + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +# ifdef __KERNEL__ + +typedef __s8 s8; +typedef __u8 u8; + +typedef __s16 s16; +typedef __u16 u16; + +typedef __s32 s32; +typedef __u32 u32; + +typedef __s64 s64; +typedef __u64 u64; + +#define BITS_PER_LONG 64 + +/* DMA addresses are 64-bits wide, in general. */ + +typedef u64 dma_addr_t; + +typedef unsigned short kmem_bufctl_t; + +# endif /* __KERNEL__ */ +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_IA64_TYPES_H */ diff --git a/include/asm-ia64/uaccess.h b/include/asm-ia64/uaccess.h new file mode 100644 index 000000000000..8edd9a90949c --- /dev/null +++ b/include/asm-ia64/uaccess.h @@ -0,0 +1,408 @@ +#ifndef _ASM_IA64_UACCESS_H +#define _ASM_IA64_UACCESS_H + +/* + * This file defines various macros to transfer memory areas across + * the user/kernel boundary. This needs to be done carefully because + * this code is executed in kernel mode and uses user-specified + * addresses. Thus, we need to be careful not to let the user to + * trick us into accessing kernel memory that would normally be + * inaccessible. This code is also fairly performance sensitive, + * so we want to spend as little time doing safety checks as + * possible. + * + * To make matters a bit more interesting, these macros sometimes also + * called from within the kernel itself, in which case the address + * validity check must be skipped. The get_fs() macro tells us what + * to do: if get_fs()==USER_DS, checking is performed, if + * get_fs()==KERNEL_DS, checking is bypassed. + * + * Note that even if the memory area specified by the user is in a + * valid address range, it is still possible that we'll get a page + * fault while accessing it. This is handled by filling out an + * exception handler fixup entry for each instruction that has the + * potential to fault. When such a fault occurs, the page fault + * handler checks to see whether the faulting instruction has a fixup + * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and + * then resumes execution at the continuation point. + * + * Based on <asm-alpha/uaccess.h>. + * + * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/page-flags.h> +#include <linux/mm.h> + +#include <asm/intrinsics.h> +#include <asm/pgtable.h> +#include <asm/io.h> + +/* + * For historical reasons, the following macros are grossly misnamed: + */ +#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ +#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define segment_eq(a, b) ((a).seg == (b).seg) + +/* + * When accessing user memory, we need to make sure the entire area really is in + * user-level space. In order to do this efficiently, we make sure that the page at + * address TASK_SIZE is never valid. We also need to make sure that the address doesn't + * point inside the virtually mapped linear page table. + */ +#define __access_ok(addr, size, segment) \ +({ \ + __chk_user_ptr(addr); \ + (likely((unsigned long) (addr) <= (segment).seg) \ + && ((segment).seg == KERNEL_DS.seg \ + || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ +}) +#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) + +/* this function will go away soon - use access_ok() instead */ +static inline int __deprecated +verify_area (int type, const void __user *addr, unsigned long size) +{ + return access_ok(type, addr, size) ? 0 : -EFAULT; +} + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * Careful to not + * (a) re-use the arguments for side effects (sizeof/typeof is ok) + * (b) require any knowledge of processes at this stage + */ +#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs()) +#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) + +/* + * The "__xxx" versions do not do address space checking, useful when + * doing multiple accesses to the same area (the programmer has to do the + * checks by hand with "access_ok()") + */ +#define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) + +extern long __put_user_unaligned_unknown (void); + +#define __put_user_unaligned(x, ptr) \ +({ \ + long __ret; \ + switch (sizeof(*(ptr))) { \ + case 1: __ret = __put_user((x), (ptr)); break; \ + case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \ + | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \ + case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \ + | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \ + case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \ + | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \ + default: __ret = __put_user_unaligned_unknown(); \ + } \ + __ret; \ +}) + +extern long __get_user_unaligned_unknown (void); + +#define __get_user_unaligned(x, ptr) \ +({ \ + long __ret; \ + switch (sizeof(*(ptr))) { \ + case 1: __ret = __get_user((x), (ptr)); break; \ + case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \ + | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \ + case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \ + | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \ + case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \ + | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \ + default: __ret = __get_user_unaligned_unknown(); \ + } \ + __ret; \ +}) + +#ifdef ASM_SUPPORTED + struct __large_struct { unsigned long buf[100]; }; +# define __m(x) (*(struct __large_struct __user *)(x)) + +/* We need to declare the __ex_table section before we can use it in .xdata. */ +asm (".section \"__ex_table\", \"a\"\n\t.previous"); + +# define __get_user_size(val, addr, n, err) \ +do { \ + register long __gu_r8 asm ("r8") = 0; \ + register long __gu_r9 asm ("r9"); \ + asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \ + "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \ + "[1:]" \ + : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \ + (err) = __gu_r8; \ + (val) = __gu_r9; \ +} while (0) + +/* + * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This + * is because they do not write to any memory gcc knows about, so there are no aliasing + * issues. + */ +# define __put_user_size(val, addr, n, err) \ +do { \ + register long __pu_r8 asm ("r8") = 0; \ + asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \ + "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \ + "[1:]" \ + : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \ + (err) = __pu_r8; \ +} while (0) + +#else /* !ASM_SUPPORTED */ +# define RELOC_TYPE 2 /* ip-rel */ +# define __get_user_size(val, addr, n, err) \ +do { \ + __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \ + (err) = ia64_getreg(_IA64_REG_R8); \ + (val) = ia64_getreg(_IA64_REG_R9); \ +} while (0) +# define __put_user_size(val, addr, n, err) \ +do { \ + __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \ + (err) = ia64_getreg(_IA64_REG_R8); \ +} while (0) +#endif /* !ASM_SUPPORTED */ + +extern void __get_user_unknown (void); + +/* + * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which + * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while + * using r8/r9. + */ +#define __do_get_user(check, x, ptr, size, segment) \ +({ \ + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + __typeof__ (size) __gu_size = (size); \ + long __gu_err = -EFAULT, __gu_val = 0; \ + \ + if (!check || __access_ok(__gu_ptr, size, segment)) \ + switch (__gu_size) { \ + case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ + case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ + case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \ + case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ + default: __get_user_unknown(); break; \ + } \ + (x) = (__typeof__(*(__gu_ptr))) __gu_val; \ + __gu_err; \ +}) + +#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS) +#define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment) + +extern void __put_user_unknown (void); + +/* + * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which + * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8. + */ +#define __do_put_user(check, x, ptr, size, segment) \ +({ \ + __typeof__ (x) __pu_x = (x); \ + __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \ + __typeof__ (size) __pu_size = (size); \ + long __pu_err = -EFAULT; \ + \ + if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \ + switch (__pu_size) { \ + case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ + case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ + case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \ + case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \ + default: __put_user_unknown(); break; \ + } \ + __pu_err; \ +}) + +#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS) +#define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment) + +/* + * Complex access routines + */ +extern unsigned long __must_check __copy_user (void __user *to, const void __user *from, + unsigned long count); + +static inline unsigned long +__copy_to_user (void __user *to, const void *from, unsigned long count) +{ + return __copy_user(to, (void __user *) from, count); +} + +static inline unsigned long +__copy_from_user (void *to, const void __user *from, unsigned long count) +{ + return __copy_user((void __user *) to, from, count); +} + +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user +#define copy_to_user(to, from, n) \ +({ \ + void __user *__cu_to = (to); \ + const void *__cu_from = (from); \ + long __cu_len = (n); \ + \ + if (__access_ok(__cu_to, __cu_len, get_fs())) \ + __cu_len = __copy_user(__cu_to, (void __user *) __cu_from, __cu_len); \ + __cu_len; \ +}) + +#define copy_from_user(to, from, n) \ +({ \ + void *__cu_to = (to); \ + const void __user *__cu_from = (from); \ + long __cu_len = (n); \ + \ + __chk_user_ptr(__cu_from); \ + if (__access_ok(__cu_from, __cu_len, get_fs())) \ + __cu_len = __copy_user((void __user *) __cu_to, __cu_from, __cu_len); \ + __cu_len; \ +}) + +#define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) + +static inline unsigned long +copy_in_user (void __user *to, const void __user *from, unsigned long n) +{ + if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))) + n = __copy_user(to, from, n); + return n; +} + +extern unsigned long __do_clear_user (void __user *, unsigned long); + +#define __clear_user(to, n) __do_clear_user(to, n) + +#define clear_user(to, n) \ +({ \ + unsigned long __cu_len = (n); \ + if (__access_ok(to, __cu_len, get_fs())) \ + __cu_len = __do_clear_user(to, __cu_len); \ + __cu_len; \ +}) + + +/* + * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else + * strlen. + */ +extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len); + +#define strncpy_from_user(to, from, n) \ +({ \ + const char __user * __sfu_from = (from); \ + long __sfu_ret = -EFAULT; \ + if (__access_ok(__sfu_from, 0, get_fs())) \ + __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ + __sfu_ret; \ +}) + +/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ +extern unsigned long __strlen_user (const char __user *); + +#define strlen_user(str) \ +({ \ + const char __user *__su_str = (str); \ + unsigned long __su_ret = 0; \ + if (__access_ok(__su_str, 0, get_fs())) \ + __su_ret = __strlen_user(__su_str); \ + __su_ret; \ +}) + +/* + * Returns: 0 if exception before NUL or reaching the supplied limit + * (N), a value greater than N if the limit would be exceeded, else + * strlen. + */ +extern unsigned long __strnlen_user (const char __user *, long); + +#define strnlen_user(str, len) \ +({ \ + const char __user *__su_str = (str); \ + unsigned long __su_ret = 0; \ + if (__access_ok(__su_str, 0, get_fs())) \ + __su_ret = __strnlen_user(__su_str, len); \ + __su_ret; \ +}) + +/* Generic code can't deal with the location-relative format that we use for compactness. */ +#define ARCH_HAS_SORT_EXTABLE +#define ARCH_HAS_SEARCH_EXTABLE + +struct exception_table_entry { + int addr; /* location-relative address of insn this fixup is for */ + int cont; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */ +}; + +extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e); +extern const struct exception_table_entry *search_exception_tables (unsigned long addr); + +static inline int +ia64_done_with_exception (struct pt_regs *regs) +{ + const struct exception_table_entry *e; + e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri); + if (e) { + ia64_handle_exception(regs, e); + return 1; + } + return 0; +} + +#define ARCH_HAS_TRANSLATE_MEM_PTR 1 +static __inline__ char * +xlate_dev_mem_ptr (unsigned long p) +{ + struct page *page; + char * ptr; + + page = pfn_to_page(p >> PAGE_SHIFT); + if (PageUncached(page)) + ptr = (char *)p + __IA64_UNCACHED_OFFSET; + else + ptr = __va(p); + + return ptr; +} + +/* + * Convert a virtual cached kernel memory pointer to an uncached pointer + */ +static __inline__ char * +xlate_dev_kmem_ptr (char * p) +{ + struct page *page; + char * ptr; + + page = virt_to_page((unsigned long)p >> PAGE_SHIFT); + if (PageUncached(page)) + ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET; + else + ptr = p; + + return ptr; +} + +#endif /* _ASM_IA64_UACCESS_H */ diff --git a/include/asm-ia64/ucontext.h b/include/asm-ia64/ucontext.h new file mode 100644 index 000000000000..bf573dc8ca6a --- /dev/null +++ b/include/asm-ia64/ucontext.h @@ -0,0 +1,12 @@ +#ifndef _ASM_IA64_UCONTEXT_H +#define _ASM_IA64_UCONTEXT_H + +struct ucontext { + struct sigcontext uc_mcontext; +}; + +#define uc_link uc_mcontext.sc_gr[0] /* wrong type; nobody cares */ +#define uc_sigmask uc_mcontext.sc_sigmask +#define uc_stack uc_mcontext.sc_stack + +#endif /* _ASM_IA64_UCONTEXT_H */ diff --git a/include/asm-ia64/unaligned.h b/include/asm-ia64/unaligned.h new file mode 100644 index 000000000000..bb8559888103 --- /dev/null +++ b/include/asm-ia64/unaligned.h @@ -0,0 +1,6 @@ +#ifndef _ASM_IA64_UNALIGNED_H +#define _ASM_IA64_UNALIGNED_H + +#include <asm-generic/unaligned.h> + +#endif /* _ASM_IA64_UNALIGNED_H */ diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h new file mode 100644 index 000000000000..33e26c557c5c --- /dev/null +++ b/include/asm-ia64/unistd.h @@ -0,0 +1,399 @@ +#ifndef _ASM_IA64_UNISTD_H +#define _ASM_IA64_UNISTD_H + +/* + * IA-64 Linux syscall numbers and inline-functions. + * + * Copyright (C) 1998-2005 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +#include <asm/break.h> + +#define __BREAK_SYSCALL __IA64_BREAK_SYSCALL + +#define __NR_ni_syscall 1024 +#define __NR_exit 1025 +#define __NR_read 1026 +#define __NR_write 1027 +#define __NR_open 1028 +#define __NR_close 1029 +#define __NR_creat 1030 +#define __NR_link 1031 +#define __NR_unlink 1032 +#define __NR_execve 1033 +#define __NR_chdir 1034 +#define __NR_fchdir 1035 +#define __NR_utimes 1036 +#define __NR_mknod 1037 +#define __NR_chmod 1038 +#define __NR_chown 1039 +#define __NR_lseek 1040 +#define __NR_getpid 1041 +#define __NR_getppid 1042 +#define __NR_mount 1043 +#define __NR_umount 1044 +#define __NR_setuid 1045 +#define __NR_getuid 1046 +#define __NR_geteuid 1047 +#define __NR_ptrace 1048 +#define __NR_access 1049 +#define __NR_sync 1050 +#define __NR_fsync 1051 +#define __NR_fdatasync 1052 +#define __NR_kill 1053 +#define __NR_rename 1054 +#define __NR_mkdir 1055 +#define __NR_rmdir 1056 +#define __NR_dup 1057 +#define __NR_pipe 1058 +#define __NR_times 1059 +#define __NR_brk 1060 +#define __NR_setgid 1061 +#define __NR_getgid 1062 +#define __NR_getegid 1063 +#define __NR_acct 1064 +#define __NR_ioctl 1065 +#define __NR_fcntl 1066 +#define __NR_umask 1067 +#define __NR_chroot 1068 +#define __NR_ustat 1069 +#define __NR_dup2 1070 +#define __NR_setreuid 1071 +#define __NR_setregid 1072 +#define __NR_getresuid 1073 +#define __NR_setresuid 1074 +#define __NR_getresgid 1075 +#define __NR_setresgid 1076 +#define __NR_getgroups 1077 +#define __NR_setgroups 1078 +#define __NR_getpgid 1079 +#define __NR_setpgid 1080 +#define __NR_setsid 1081 +#define __NR_getsid 1082 +#define __NR_sethostname 1083 +#define __NR_setrlimit 1084 +#define __NR_getrlimit 1085 +#define __NR_getrusage 1086 +#define __NR_gettimeofday 1087 +#define __NR_settimeofday 1088 +#define __NR_select 1089 +#define __NR_poll 1090 +#define __NR_symlink 1091 +#define __NR_readlink 1092 +#define __NR_uselib 1093 +#define __NR_swapon 1094 +#define __NR_swapoff 1095 +#define __NR_reboot 1096 +#define __NR_truncate 1097 +#define __NR_ftruncate 1098 +#define __NR_fchmod 1099 +#define __NR_fchown 1100 +#define __NR_getpriority 1101 +#define __NR_setpriority 1102 +#define __NR_statfs 1103 +#define __NR_fstatfs 1104 +#define __NR_gettid 1105 +#define __NR_semget 1106 +#define __NR_semop 1107 +#define __NR_semctl 1108 +#define __NR_msgget 1109 +#define __NR_msgsnd 1110 +#define __NR_msgrcv 1111 +#define __NR_msgctl 1112 +#define __NR_shmget 1113 +#define __NR_shmat 1114 +#define __NR_shmdt 1115 +#define __NR_shmctl 1116 +/* also known as klogctl() in GNU libc: */ +#define __NR_syslog 1117 +#define __NR_setitimer 1118 +#define __NR_getitimer 1119 +/* 1120 was __NR_old_stat */ +/* 1121 was __NR_old_lstat */ +/* 1122 was __NR_old_fstat */ +#define __NR_vhangup 1123 +#define __NR_lchown 1124 +#define __NR_remap_file_pages 1125 +#define __NR_wait4 1126 +#define __NR_sysinfo 1127 +#define __NR_clone 1128 +#define __NR_setdomainname 1129 +#define __NR_uname 1130 +#define __NR_adjtimex 1131 +/* 1132 was __NR_create_module */ +#define __NR_init_module 1133 +#define __NR_delete_module 1134 +/* 1135 was __NR_get_kernel_syms */ +/* 1136 was __NR_query_module */ +#define __NR_quotactl 1137 +#define __NR_bdflush 1138 +#define __NR_sysfs 1139 +#define __NR_personality 1140 +#define __NR_afs_syscall 1141 +#define __NR_setfsuid 1142 +#define __NR_setfsgid 1143 +#define __NR_getdents 1144 +#define __NR_flock 1145 +#define __NR_readv 1146 +#define __NR_writev 1147 +#define __NR_pread64 1148 +#define __NR_pwrite64 1149 +#define __NR__sysctl 1150 +#define __NR_mmap 1151 +#define __NR_munmap 1152 +#define __NR_mlock 1153 +#define __NR_mlockall 1154 +#define __NR_mprotect 1155 +#define __NR_mremap 1156 +#define __NR_msync 1157 +#define __NR_munlock 1158 +#define __NR_munlockall 1159 +#define __NR_sched_getparam 1160 +#define __NR_sched_setparam 1161 +#define __NR_sched_getscheduler 1162 +#define __NR_sched_setscheduler 1163 +#define __NR_sched_yield 1164 +#define __NR_sched_get_priority_max 1165 +#define __NR_sched_get_priority_min 1166 +#define __NR_sched_rr_get_interval 1167 +#define __NR_nanosleep 1168 +#define __NR_nfsservctl 1169 +#define __NR_prctl 1170 +/* 1171 is reserved for backwards compatibility with old __NR_getpagesize */ +#define __NR_mmap2 1172 +#define __NR_pciconfig_read 1173 +#define __NR_pciconfig_write 1174 +#define __NR_perfmonctl 1175 +#define __NR_sigaltstack 1176 +#define __NR_rt_sigaction 1177 +#define __NR_rt_sigpending 1178 +#define __NR_rt_sigprocmask 1179 +#define __NR_rt_sigqueueinfo 1180 +#define __NR_rt_sigreturn 1181 +#define __NR_rt_sigsuspend 1182 +#define __NR_rt_sigtimedwait 1183 +#define __NR_getcwd 1184 +#define __NR_capget 1185 +#define __NR_capset 1186 +#define __NR_sendfile 1187 +#define __NR_getpmsg 1188 +#define __NR_putpmsg 1189 +#define __NR_socket 1190 +#define __NR_bind 1191 +#define __NR_connect 1192 +#define __NR_listen 1193 +#define __NR_accept 1194 +#define __NR_getsockname 1195 +#define __NR_getpeername 1196 +#define __NR_socketpair 1197 +#define __NR_send 1198 +#define __NR_sendto 1199 +#define __NR_recv 1200 +#define __NR_recvfrom 1201 +#define __NR_shutdown 1202 +#define __NR_setsockopt 1203 +#define __NR_getsockopt 1204 +#define __NR_sendmsg 1205 +#define __NR_recvmsg 1206 +#define __NR_pivot_root 1207 +#define __NR_mincore 1208 +#define __NR_madvise 1209 +#define __NR_stat 1210 +#define __NR_lstat 1211 +#define __NR_fstat 1212 +#define __NR_clone2 1213 +#define __NR_getdents64 1214 +#define __NR_getunwind 1215 +#define __NR_readahead 1216 +#define __NR_setxattr 1217 +#define __NR_lsetxattr 1218 +#define __NR_fsetxattr 1219 +#define __NR_getxattr 1220 +#define __NR_lgetxattr 1221 +#define __NR_fgetxattr 1222 +#define __NR_listxattr 1223 +#define __NR_llistxattr 1224 +#define __NR_flistxattr 1225 +#define __NR_removexattr 1226 +#define __NR_lremovexattr 1227 +#define __NR_fremovexattr 1228 +#define __NR_tkill 1229 +#define __NR_futex 1230 +#define __NR_sched_setaffinity 1231 +#define __NR_sched_getaffinity 1232 +#define __NR_set_tid_address 1233 +#define __NR_fadvise64 1234 +#define __NR_tgkill 1235 +#define __NR_exit_group 1236 +#define __NR_lookup_dcookie 1237 +#define __NR_io_setup 1238 +#define __NR_io_destroy 1239 +#define __NR_io_getevents 1240 +#define __NR_io_submit 1241 +#define __NR_io_cancel 1242 +#define __NR_epoll_create 1243 +#define __NR_epoll_ctl 1244 +#define __NR_epoll_wait 1245 +#define __NR_restart_syscall 1246 +#define __NR_semtimedop 1247 +#define __NR_timer_create 1248 +#define __NR_timer_settime 1249 +#define __NR_timer_gettime 1250 +#define __NR_timer_getoverrun 1251 +#define __NR_timer_delete 1252 +#define __NR_clock_settime 1253 +#define __NR_clock_gettime 1254 +#define __NR_clock_getres 1255 +#define __NR_clock_nanosleep 1256 +#define __NR_fstatfs64 1257 +#define __NR_statfs64 1258 +#define __NR_mbind 1259 +#define __NR_get_mempolicy 1260 +#define __NR_set_mempolicy 1261 +#define __NR_mq_open 1262 +#define __NR_mq_unlink 1263 +#define __NR_mq_timedsend 1264 +#define __NR_mq_timedreceive 1265 +#define __NR_mq_notify 1266 +#define __NR_mq_getsetattr 1267 +#define __NR_kexec_load 1268 +#define __NR_vserver 1269 +#define __NR_waitid 1270 +#define __NR_add_key 1271 +#define __NR_request_key 1272 +#define __NR_keyctl 1273 + +#ifdef __KERNEL__ + +#include <linux/config.h> + +#define NR_syscalls 256 /* length of syscall table */ + +#define __ARCH_WANT_SYS_RT_SIGACTION + +#ifdef CONFIG_IA32_SUPPORT +# define __ARCH_WANT_SYS_FADVISE64 +# define __ARCH_WANT_SYS_GETPGRP +# define __ARCH_WANT_SYS_LLSEEK +# define __ARCH_WANT_SYS_NICE +# define __ARCH_WANT_SYS_OLD_GETRLIMIT +# define __ARCH_WANT_SYS_OLDUMOUNT +# define __ARCH_WANT_SYS_SIGPENDING +# define __ARCH_WANT_SYS_SIGPROCMASK +# define __ARCH_WANT_COMPAT_SYS_TIME +#endif + +#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) + +#include <linux/types.h> +#include <linux/linkage.h> +#include <linux/compiler.h> + +extern long __ia64_syscall (long a0, long a1, long a2, long a3, long a4, long nr); + +#ifdef __KERNEL_SYSCALLS__ + +#include <linux/compiler.h> +#include <linux/string.h> +#include <linux/signal.h> +#include <asm/ptrace.h> +#include <linux/stringify.h> +#include <linux/syscalls.h> + +static inline long +open (const char * name, int mode, int flags) +{ + return sys_open(name, mode, flags); +} + +static inline long +dup (int fd) +{ + return sys_dup(fd); +} + +static inline long +close (int fd) +{ + return sys_close(fd); +} + +static inline off_t +lseek (int fd, off_t off, int whence) +{ + return sys_lseek(fd, off, whence); +} + +static inline void +_exit (int value) +{ + sys_exit(value); +} + +#define exit(x) _exit(x) + +static inline long +write (int fd, const char * buf, size_t nr) +{ + return sys_write(fd, buf, nr); +} + +static inline long +read (int fd, char * buf, size_t nr) +{ + return sys_read(fd, buf, nr); +} + + +static inline long +setsid (void) +{ + return sys_setsid(); +} + +static inline pid_t +waitpid (int pid, int * wait_stat, int flags) +{ + return sys_wait4(pid, wait_stat, flags, NULL); +} + + +extern int execve (const char *filename, char *const av[], char *const ep[]); +extern pid_t clone (unsigned long flags, void *sp); + +#endif /* __KERNEL_SYSCALLS__ */ + +asmlinkage unsigned long sys_mmap( + unsigned long addr, unsigned long len, + int prot, int flags, + int fd, long off); +asmlinkage unsigned long sys_mmap2( + unsigned long addr, unsigned long len, + int prot, int flags, + int fd, long pgoff); +struct pt_regs; +struct sigaction; +long sys_execve(char __user *filename, char __user * __user *argv, + char __user * __user *envp, struct pt_regs *regs); +asmlinkage long sys_pipe(void); +asmlinkage long sys_ptrace(long request, pid_t pid, + unsigned long addr, unsigned long data); +asmlinkage long sys_rt_sigaction(int sig, + const struct sigaction __user *act, + struct sigaction __user *oact, + size_t sigsetsize); + +/* + * "Conditional" syscalls + * + * Note, this macro can only be used in the file which defines sys_ni_syscall, i.e., in + * kernel/sys_ni.c. This version causes warnings because the declaration isn't a + * proper prototype, but we can't use __typeof__ either, because not all cond_syscall() + * declarations have prototypes at the moment. + */ +#define cond_syscall(x) asmlinkage long x (void) __attribute__((weak,alias("sys_ni_syscall"))) + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* _ASM_IA64_UNISTD_H */ diff --git a/include/asm-ia64/unwind.h b/include/asm-ia64/unwind.h new file mode 100644 index 000000000000..61426ad3ecdb --- /dev/null +++ b/include/asm-ia64/unwind.h @@ -0,0 +1,240 @@ +#ifndef _ASM_IA64_UNWIND_H +#define _ASM_IA64_UNWIND_H + +/* + * Copyright (C) 1999-2000, 2003 Hewlett-Packard Co + * David Mosberger-Tang <davidm@hpl.hp.com> + * + * A simple API for unwinding kernel stacks. This is used for + * debugging and error reporting purposes. The kernel doesn't need + * full-blown stack unwinding with all the bells and whitles, so there + * is not much point in implementing the full IA-64 unwind API (though + * it would of course be possible to implement the kernel API on top + * of it). + */ + +struct task_struct; /* forward declaration */ +struct switch_stack; /* forward declaration */ + +enum unw_application_register { + UNW_AR_BSP, + UNW_AR_BSPSTORE, + UNW_AR_PFS, + UNW_AR_RNAT, + UNW_AR_UNAT, + UNW_AR_LC, + UNW_AR_EC, + UNW_AR_FPSR, + UNW_AR_RSC, + UNW_AR_CCV, + UNW_AR_CSD, + UNW_AR_SSD +}; + +/* + * The following declarations are private to the unwind + * implementation: + */ + +struct unw_stack { + unsigned long limit; + unsigned long top; +}; + +#define UNW_FLAG_INTERRUPT_FRAME (1UL << 0) + +/* + * No user of this module should every access this structure directly + * as it is subject to change. It is declared here solely so we can + * use automatic variables. + */ +struct unw_frame_info { + struct unw_stack regstk; + struct unw_stack memstk; + unsigned int flags; + short hint; + short prev_script; + + /* current frame info: */ + unsigned long bsp; /* backing store pointer value */ + unsigned long sp; /* stack pointer value */ + unsigned long psp; /* previous sp value */ + unsigned long ip; /* instruction pointer value */ + unsigned long pr; /* current predicate values */ + unsigned long *cfm_loc; /* cfm save location (or NULL) */ + unsigned long pt; /* struct pt_regs location */ + + struct task_struct *task; + struct switch_stack *sw; + + /* preserved state: */ + unsigned long *bsp_loc; /* previous bsp save location */ + unsigned long *bspstore_loc; + unsigned long *pfs_loc; + unsigned long *rnat_loc; + unsigned long *rp_loc; + unsigned long *pri_unat_loc; + unsigned long *unat_loc; + unsigned long *pr_loc; + unsigned long *lc_loc; + unsigned long *fpsr_loc; + struct unw_ireg { + unsigned long *loc; + struct unw_ireg_nat { + long type : 3; /* enum unw_nat_type */ + signed long off : 61; /* NaT word is at loc+nat.off */ + } nat; + } r4, r5, r6, r7; + unsigned long *b1_loc, *b2_loc, *b3_loc, *b4_loc, *b5_loc; + struct ia64_fpreg *f2_loc, *f3_loc, *f4_loc, *f5_loc, *fr_loc[16]; +}; + +/* + * The official API follows below: + */ + +struct unw_table_entry { + u64 start_offset; + u64 end_offset; + u64 info_offset; +}; + +/* + * Initialize unwind support. + */ +extern void unw_init (void); + +extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp, + const void *table_start, const void *table_end); + +extern void unw_remove_unwind_table (void *handle); + +/* + * Prepare to unwind blocked task t. + */ +extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); + +/* + * Prepare to unwind from interruption. The pt-regs and switch-stack structures must have + * be "adjacent" (no state modifications between pt-regs and switch-stack). + */ +extern void unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t, + struct pt_regs *pt, struct switch_stack *sw); + +extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, + struct switch_stack *sw); + +/* + * Prepare to unwind the currently running thread. + */ +extern void unw_init_running (void (*callback)(struct unw_frame_info *info, void *arg), void *arg); + +/* + * Unwind to previous to frame. Returns 0 if successful, negative + * number in case of an error. + */ +extern int unw_unwind (struct unw_frame_info *info); + +/* + * Unwind until the return pointer is in user-land (or until an error + * occurs). Returns 0 if successful, negative number in case of + * error. + */ +extern int unw_unwind_to_user (struct unw_frame_info *info); + +#define unw_is_intr_frame(info) (((info)->flags & UNW_FLAG_INTERRUPT_FRAME) != 0) + +static inline int +unw_get_ip (struct unw_frame_info *info, unsigned long *valp) +{ + *valp = (info)->ip; + return 0; +} + +static inline int +unw_get_sp (struct unw_frame_info *info, unsigned long *valp) +{ + *valp = (info)->sp; + return 0; +} + +static inline int +unw_get_psp (struct unw_frame_info *info, unsigned long *valp) +{ + *valp = (info)->psp; + return 0; +} + +static inline int +unw_get_bsp (struct unw_frame_info *info, unsigned long *valp) +{ + *valp = (info)->bsp; + return 0; +} + +static inline int +unw_get_cfm (struct unw_frame_info *info, unsigned long *valp) +{ + *valp = *(info)->cfm_loc; + return 0; +} + +static inline int +unw_set_cfm (struct unw_frame_info *info, unsigned long val) +{ + *(info)->cfm_loc = val; + return 0; +} + +static inline int +unw_get_rp (struct unw_frame_info *info, unsigned long *val) +{ + if (!info->rp_loc) + return -1; + *val = *info->rp_loc; + return 0; +} + +extern int unw_access_gr (struct unw_frame_info *, int, unsigned long *, char *, int); +extern int unw_access_br (struct unw_frame_info *, int, unsigned long *, int); +extern int unw_access_fr (struct unw_frame_info *, int, struct ia64_fpreg *, int); +extern int unw_access_ar (struct unw_frame_info *, int, unsigned long *, int); +extern int unw_access_pr (struct unw_frame_info *, unsigned long *, int); + +static inline int +unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat) +{ + return unw_access_gr(i, n, &v, &nat, 1); +} + +static inline int +unw_set_br (struct unw_frame_info *i, int n, unsigned long v) +{ + return unw_access_br(i, n, &v, 1); +} + +static inline int +unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v) +{ + return unw_access_fr(i, n, &v, 1); +} + +static inline int +unw_set_ar (struct unw_frame_info *i, int n, unsigned long v) +{ + return unw_access_ar(i, n, &v, 1); +} + +static inline int +unw_set_pr (struct unw_frame_info *i, unsigned long v) +{ + return unw_access_pr(i, &v, 1); +} + +#define unw_get_gr(i,n,v,nat) unw_access_gr(i,n,v,nat,0) +#define unw_get_br(i,n,v) unw_access_br(i,n,v,0) +#define unw_get_fr(i,n,v) unw_access_fr(i,n,v,0) +#define unw_get_ar(i,n,v) unw_access_ar(i,n,v,0) +#define unw_get_pr(i,v) unw_access_pr(i,v,0) + +#endif /* _ASM_UNWIND_H */ diff --git a/include/asm-ia64/user.h b/include/asm-ia64/user.h new file mode 100644 index 000000000000..78e5a20140aa --- /dev/null +++ b/include/asm-ia64/user.h @@ -0,0 +1,58 @@ +#ifndef _ASM_IA64_USER_H +#define _ASM_IA64_USER_H + +/* + * Core file format: The core file is written in such a way that gdb + * can understand it and provide useful information to the user (under + * linux we use the `trad-core' bfd). The file contents are as + * follows: + * + * upage: 1 page consisting of a user struct that tells gdb + * what is present in the file. Directly after this is a + * copy of the task_struct, which is currently not used by gdb, + * but it may come in handy at some point. All of the registers + * are stored as part of the upage. The upage should always be + * only one page long. + * data: The data segment follows next. We use current->end_text to + * current->brk to pick up all of the user variables, plus any memory + * that may have been sbrk'ed. No attempt is made to determine if a + * page is demand-zero or if a page is totally unused, we just cover + * the entire range. All of the addresses are rounded in such a way + * that an integral number of pages is written. + * stack: We need the stack information in order to get a meaningful + * backtrace. We need to write the data from usp to + * current->start_stack, so we round each of these in order to be able + * to write an integer number of pages. + * + * Modified 1998, 1999, 2001 + * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co + */ + +#include <linux/ptrace.h> +#include <linux/types.h> + +#include <asm/page.h> + +#define EF_SIZE 3072 /* XXX fix me */ + +struct user { + unsigned long regs[EF_SIZE/8+32]; /* integer and fp regs */ + size_t u_tsize; /* text size (pages) */ + size_t u_dsize; /* data size (pages) */ + size_t u_ssize; /* stack size (pages) */ + unsigned long start_code; /* text starting address */ + unsigned long start_data; /* data starting address */ + unsigned long start_stack; /* stack starting address */ + long int signal; /* signal causing core dump */ + struct regs * u_ar0; /* help gdb find registers */ + unsigned long magic; /* identifies a core file */ + char u_comm[32]; /* user command name */ +}; + +#define NBPG PAGE_SIZE +#define UPAGES 1 +#define HOST_TEXT_START_ADDR (u.start_code) +#define HOST_DATA_START_ADDR (u.start_data) +#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) + +#endif /* _ASM_IA64_USER_H */ diff --git a/include/asm-ia64/ustack.h b/include/asm-ia64/ustack.h new file mode 100644 index 000000000000..da55c91246e3 --- /dev/null +++ b/include/asm-ia64/ustack.h @@ -0,0 +1,16 @@ +#ifndef _ASM_IA64_USTACK_H +#define _ASM_IA64_USTACK_H + +/* + * Constants for the user stack size + */ + +#include <asm/page.h> + +/* The absolute hard limit for stack size is 1/2 of the mappable space in the region */ +#define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2) +/* Make a default stack size of 2GB */ +#define DEFAULT_USER_STACK_SIZE (1UL << 31) +#define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT) + +#endif /* _ASM_IA64_USTACK_H */ diff --git a/include/asm-ia64/vga.h b/include/asm-ia64/vga.h new file mode 100644 index 000000000000..1f446d6841f6 --- /dev/null +++ b/include/asm-ia64/vga.h @@ -0,0 +1,22 @@ +/* + * Access to VGA videoram + * + * (c) 1998 Martin Mares <mj@ucw.cz> + * (c) 1999 Asit Mallick <asit.k.mallick@intel.com> + * (c) 1999 Don Dugger <don.dugger@intel.com> + */ + +#ifndef __ASM_IA64_VGA_H_ +#define __ASM_IA64_VGA_H_ + +/* + * On the PC, we can just recalculate addresses and then access the + * videoram directly without any black magic. + */ + +#define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0)) + +#define vga_readb(x) (*(x)) +#define vga_writeb(x,y) (*(y) = (x)) + +#endif /* __ASM_IA64_VGA_H_ */ diff --git a/include/asm-ia64/xor.h b/include/asm-ia64/xor.h new file mode 100644 index 000000000000..41fb8744d17a --- /dev/null +++ b/include/asm-ia64/xor.h @@ -0,0 +1,33 @@ +/* + * include/asm-ia64/xor.h + * + * Optimized RAID-5 checksumming functions for IA-64. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * You should have received a copy of the GNU General Public License + * (for example /usr/src/linux/COPYING); if not, write to the Free + * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +extern void xor_ia64_2(unsigned long, unsigned long *, unsigned long *); +extern void xor_ia64_3(unsigned long, unsigned long *, unsigned long *, + unsigned long *); +extern void xor_ia64_4(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *); +extern void xor_ia64_5(unsigned long, unsigned long *, unsigned long *, + unsigned long *, unsigned long *, unsigned long *); + +static struct xor_block_template xor_block_ia64 = { + .name = "ia64", + .do_2 = xor_ia64_2, + .do_3 = xor_ia64_3, + .do_4 = xor_ia64_4, + .do_5 = xor_ia64_5, +}; + +#define XOR_TRY_TEMPLATES xor_speed(&xor_block_ia64) |