diff options
Diffstat (limited to 'arch/parisc/kernel')
25 files changed, 758 insertions, 1159 deletions
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 1ad44f92d6e4..e23c4e1e3a25 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -30,7 +30,6 @@ #include <linux/types.h> #include <linux/sched.h> #include <linux/thread_info.h> -#include <linux/version.h> #include <linux/ptrace.h> #include <linux/hardirq.h> diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index f46a07a79218..a065349aee37 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -27,6 +27,7 @@ #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/processor.h> +#include <asm/sections.h> int split_tlb; int dcache_stride; @@ -207,6 +208,9 @@ parisc_cache_init(void) /* "New and Improved" version from Jim Hull * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift)) + * The following CAFL_STRIDE is an optimized version, see + * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html + * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html */ #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift)) dcache_stride = CAFL_STRIDE(cache_info.dc_conf); @@ -266,7 +270,6 @@ void flush_dcache_page(struct page *page) unsigned long offset; unsigned long addr; pgoff_t pgoff; - pte_t *pte; unsigned long pfn = page_to_pfn(page); @@ -297,21 +300,16 @@ void flush_dcache_page(struct page *page) * taking a page fault if the pte doesn't exist. * This is just for speed. If the page translation * isn't there, there's no point exciting the - * nadtlb handler into a nullification frenzy */ - - - if(!(pte = translation_exists(mpnt, addr))) - continue; - - /* make sure we really have this page: the private + * nadtlb handler into a nullification frenzy. + * + * Make sure we really have this page: the private * mappings may cover this area but have COW'd this - * particular page */ - if(pte_pfn(*pte) != pfn) - continue; - - __flush_cache_page(mpnt, addr); - - break; + * particular page. + */ + if (translation_exists(mpnt, addr, pfn)) { + __flush_cache_page(mpnt, addr); + break; + } } flush_dcache_mmap_unlock(mapping); } @@ -339,17 +337,15 @@ int parisc_cache_flush_threshold = FLUSH_THRESHOLD; void parisc_setup_cache_timing(void) { unsigned long rangetime, alltime; - extern char _text; /* start of kernel code, defined by linker */ - extern char _end; /* end of BSS, defined by linker */ unsigned long size; alltime = mfctl(16); flush_data_cache(); alltime = mfctl(16) - alltime; - size = (unsigned long)(&_end - _text); + size = (unsigned long)(_end - _text); rangetime = mfctl(16); - flush_kernel_dcache_range((unsigned long)&_text, size); + flush_kernel_dcache_range((unsigned long)_text, size); rangetime = mfctl(16) - rangetime; printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index d34bbe7ae0e3..d016d672ec2b 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -46,36 +46,51 @@ static struct device root = { .bus_id = "parisc", }; -#define for_each_padev(padev) \ - for (padev = next_dev(&root); padev != NULL; \ - padev = next_dev(&padev->dev)) +static inline int check_dev(struct device *dev) +{ + if (dev->bus == &parisc_bus_type) { + struct parisc_device *pdev; + pdev = to_parisc_device(dev); + return pdev->id.hw_type != HPHW_FAULTY; + } + return 1; +} + +static struct device * +parse_tree_node(struct device *parent, int index, struct hardware_path *modpath); -#define check_dev(padev) \ - (padev->id.hw_type != HPHW_FAULTY) ? padev : next_dev(&padev->dev) +struct recurse_struct { + void * obj; + int (*fn)(struct device *, void *); +}; + +static int descend_children(struct device * dev, void * data) +{ + struct recurse_struct * recurse_data = (struct recurse_struct *)data; + + if (recurse_data->fn(dev, recurse_data->obj)) + return 1; + else + return device_for_each_child(dev, recurse_data, descend_children); +} /** - * next_dev - enumerates registered devices - * @dev: the previous device returned from next_dev + * for_each_padev - Iterate over all devices in the tree + * @fn: Function to call for each device. + * @data: Data to pass to the called function. * - * next_dev does a depth-first search of the tree, returning parents - * before children. Returns NULL when there are no more devices. + * This performs a depth-first traversal of the tree, calling the + * function passed for each node. It calls the function for parents + * before children. */ -static struct parisc_device *next_dev(struct device *dev) -{ - if (!list_empty(&dev->children)) { - dev = list_to_dev(dev->children.next); - return check_dev(to_parisc_device(dev)); - } - while (dev != &root) { - if (dev->node.next != &dev->parent->children) { - dev = list_to_dev(dev->node.next); - return to_parisc_device(dev); - } - dev = dev->parent; - } - - return NULL; +static int for_each_padev(int (*fn)(struct device *, void *), void * data) +{ + struct recurse_struct recurse_data = { + .obj = data, + .fn = fn, + }; + return device_for_each_child(&root, &recurse_data, descend_children); } /** @@ -105,12 +120,6 @@ static int match_device(struct parisc_driver *driver, struct parisc_device *dev) return 0; } -static void claim_device(struct parisc_driver *driver, struct parisc_device *dev) -{ - dev->driver = driver; - request_mem_region(dev->hpa, 0x1000, driver->name); -} - static int parisc_driver_probe(struct device *dev) { int rc; @@ -119,8 +128,8 @@ static int parisc_driver_probe(struct device *dev) rc = pa_drv->probe(pa_dev); - if(!rc) - claim_device(pa_drv, pa_dev); + if (!rc) + pa_dev->driver = pa_drv; return rc; } @@ -131,7 +140,6 @@ static int parisc_driver_remove(struct device *dev) struct parisc_driver *pa_drv = to_parisc_driver(dev->driver); if (pa_drv->remove) pa_drv->remove(pa_dev); - release_mem_region(pa_dev->hpa, 0x1000); return 0; } @@ -173,6 +181,24 @@ int register_parisc_driver(struct parisc_driver *driver) } EXPORT_SYMBOL(register_parisc_driver); + +struct match_count { + struct parisc_driver * driver; + int count; +}; + +static int match_and_count(struct device * dev, void * data) +{ + struct match_count * m = data; + struct parisc_device * pdev = to_parisc_device(dev); + + if (check_dev(dev)) { + if (match_device(m->driver, pdev)) + m->count++; + } + return 0; +} + /** * count_parisc_driver - count # of devices this driver would match * @driver: the PA-RISC driver to try @@ -182,15 +208,14 @@ EXPORT_SYMBOL(register_parisc_driver); */ int count_parisc_driver(struct parisc_driver *driver) { - struct parisc_device *device; - int cnt = 0; + struct match_count m = { + .driver = driver, + .count = 0, + }; - for_each_padev(device) { - if (match_device(driver, device)) - cnt++; - } + for_each_padev(match_and_count, &m); - return cnt; + return m.count; } @@ -206,14 +231,34 @@ int unregister_parisc_driver(struct parisc_driver *driver) } EXPORT_SYMBOL(unregister_parisc_driver); -static struct parisc_device *find_device_by_addr(unsigned long hpa) +struct find_data { + unsigned long hpa; + struct parisc_device * dev; +}; + +static int find_device(struct device * dev, void * data) { - struct parisc_device *dev; - for_each_padev(dev) { - if (dev->hpa == hpa) - return dev; + struct parisc_device * pdev = to_parisc_device(dev); + struct find_data * d = (struct find_data*)data; + + if (check_dev(dev)) { + if (pdev->hpa.start == d->hpa) { + d->dev = pdev; + return 1; + } } - return NULL; + return 0; +} + +static struct parisc_device *find_device_by_addr(unsigned long hpa) +{ + struct find_data d = { + .hpa = hpa, + }; + int ret; + + ret = for_each_padev(find_device, &d); + return ret ? d.dev : NULL; } /** @@ -387,6 +432,23 @@ struct parisc_device * create_tree_node(char id, struct device *parent) return dev; } +struct match_id_data { + char id; + struct parisc_device * dev; +}; + +static int match_by_id(struct device * dev, void * data) +{ + struct parisc_device * pdev = to_parisc_device(dev); + struct match_id_data * d = data; + + if (pdev->hw_path == d->id) { + d->dev = pdev; + return 1; + } + return 0; +} + /** * alloc_tree_node - returns a device entry in the iotree * @parent: the parent node in the tree @@ -397,15 +459,13 @@ struct parisc_device * create_tree_node(char id, struct device *parent) */ static struct parisc_device * alloc_tree_node(struct device *parent, char id) { - struct device *dev; - - list_for_each_entry(dev, &parent->children, node) { - struct parisc_device *padev = to_parisc_device(dev); - if (padev->hw_path == id) - return padev; - } - - return create_tree_node(id, parent); + struct match_id_data d = { + .id = id, + }; + if (device_for_each_child(parent, &d, match_by_id)) + return d.dev; + else + return create_tree_node(id, parent); } static struct parisc_device *create_parisc_device(struct hardware_path *modpath) @@ -439,10 +499,12 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) dev = create_parisc_device(mod_path); if (dev->id.hw_type != HPHW_FAULTY) { - char p[64]; - print_pa_hwpath(dev, p); - printk("Two devices have hardware path %s. Please file a bug with HP.\n" - "In the meantime, you could try rearranging your cards.\n", p); + printk(KERN_ERR "Two devices have hardware path [%s]. " + "IODC data for second device: " + "%02x%02x%02x%02x%02x%02x\n" + "Rearranging GSC cards sometimes helps\n", + parisc_pathname(dev), iodc_data[0], iodc_data[1], + iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]); return NULL; } @@ -451,12 +513,27 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path) dev->id.hversion_rev = iodc_data[1] & 0x0f; dev->id.sversion = ((iodc_data[4] & 0x0f) << 16) | (iodc_data[5] << 8) | iodc_data[6]; - dev->hpa = hpa; + dev->hpa.name = parisc_pathname(dev); + dev->hpa.start = hpa; + if (hpa == 0xf4000000 || hpa == 0xf6000000 || + hpa == 0xf8000000 || hpa == 0xfa000000) { + dev->hpa.end = hpa + 0x01ffffff; + } else { + dev->hpa.end = hpa + 0xfff; + } + dev->hpa.flags = IORESOURCE_MEM; name = parisc_hardware_description(&dev->id); if (name) { strlcpy(dev->name, name, sizeof(dev->name)); } + /* Silently fail things like mouse ports which are subsumed within + * the keyboard controller + */ + if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa)) + printk("Unable to claim HPA %lx for device %s\n", + hpa, name); + return dev; } @@ -555,6 +632,33 @@ static int match_parisc_device(struct device *dev, int index, return (curr->hw_path == id); } +struct parse_tree_data { + int index; + struct hardware_path * modpath; + struct device * dev; +}; + +static int check_parent(struct device * dev, void * data) +{ + struct parse_tree_data * d = data; + + if (check_dev(dev)) { + if (dev->bus == &parisc_bus_type) { + if (match_parisc_device(dev, d->index, d->modpath)) + d->dev = dev; + } else if (is_pci_dev(dev)) { + if (match_pci_device(dev, d->index, d->modpath)) + d->dev = dev; + } else if (dev->bus == NULL) { + /* we are on a bus bridge */ + struct device *new = parse_tree_node(dev, d->index, d->modpath); + if (new) + d->dev = new; + } + } + return d->dev != NULL; +} + /** * parse_tree_node - returns a device entry in the iotree * @parent: the parent node in the tree @@ -568,24 +672,18 @@ static int match_parisc_device(struct device *dev, int index, static struct device * parse_tree_node(struct device *parent, int index, struct hardware_path *modpath) { - struct device *device; - - list_for_each_entry(device, &parent->children, node) { - if (device->bus == &parisc_bus_type) { - if (match_parisc_device(device, index, modpath)) - return device; - } else if (is_pci_dev(device)) { - if (match_pci_device(device, index, modpath)) - return device; - } else if (device->bus == NULL) { - /* we are on a bus bridge */ - struct device *new = parse_tree_node(device, index, modpath); - if (new) - return new; - } - } + struct parse_tree_data d = { + .index = index, + .modpath = modpath, + }; - return NULL; + struct recurse_struct recurse_data = { + .obj = &d, + .fn = check_parent, + }; + + device_for_each_child(parent, &recurse_data, descend_children); + return d.dev; } /** @@ -636,7 +734,7 @@ EXPORT_SYMBOL(device_to_hwpath); ((dev->id.hw_type == HPHW_IOA) || (dev->id.hw_type == HPHW_BCPORT)) #define IS_LOWER_PORT(dev) \ - ((gsc_readl(dev->hpa + offsetof(struct bc_module, io_status)) \ + ((gsc_readl(dev->hpa.start + offsetof(struct bc_module, io_status)) \ & BC_PORT_MASK) == BC_LOWER_PORT) #define MAX_NATIVE_DEVICES 64 @@ -645,8 +743,8 @@ EXPORT_SYMBOL(device_to_hwpath); #define FLEX_MASK F_EXTEND(0xfffc0000) #define IO_IO_LOW offsetof(struct bc_module, io_io_low) #define IO_IO_HIGH offsetof(struct bc_module, io_io_high) -#define READ_IO_IO_LOW(dev) (unsigned long)(signed int)gsc_readl(dev->hpa + IO_IO_LOW) -#define READ_IO_IO_HIGH(dev) (unsigned long)(signed int)gsc_readl(dev->hpa + IO_IO_HIGH) +#define READ_IO_IO_LOW(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_LOW) +#define READ_IO_IO_HIGH(dev) (unsigned long)(signed int)gsc_readl(dev->hpa.start + IO_IO_HIGH) static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, struct device *parent); @@ -655,10 +753,10 @@ void walk_lower_bus(struct parisc_device *dev) { unsigned long io_io_low, io_io_high; - if(!BUS_CONVERTER(dev) || IS_LOWER_PORT(dev)) + if (!BUS_CONVERTER(dev) || IS_LOWER_PORT(dev)) return; - if(dev->id.hw_type == HPHW_IOA) { + if (dev->id.hw_type == HPHW_IOA) { io_io_low = (unsigned long)(signed int)(READ_IO_IO_LOW(dev) << 16); io_io_high = io_io_low + MAX_NATIVE_DEVICES * NATIVE_DEVICE_OFFSET; } else { @@ -731,7 +829,7 @@ static void print_parisc_device(struct parisc_device *dev) print_pa_hwpath(dev, hw_path); printk(KERN_INFO "%d. %s at 0x%lx [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", - ++count, dev->name, dev->hpa, hw_path, dev->id.hw_type, + ++count, dev->name, dev->hpa.start, hw_path, dev->id.hw_type, dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); if (dev->num_addrs) { @@ -753,13 +851,20 @@ void init_parisc_bus(void) get_device(&root); } + +static int print_one_device(struct device * dev, void * data) +{ + struct parisc_device * pdev = to_parisc_device(dev); + + if (check_dev(dev)) + print_parisc_device(pdev); + return 0; +} + /** * print_parisc_devices - Print out a list of devices found in this system */ void print_parisc_devices(void) { - struct parisc_device *dev; - for_each_padev(dev) { - print_parisc_device(dev); - } + for_each_padev(print_one_device, NULL); } diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index be0f07f2fa58..9af4b22a6d77 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -30,14 +30,14 @@ * - save registers to kernel stack and handle in assembly or C */ +#include <asm/psw.h> #include <asm/assembly.h> /* for LDREG/STREG defines */ #include <asm/pgtable.h> -#include <asm/psw.h> #include <asm/signal.h> #include <asm/unistd.h> #include <asm/thread_info.h> -#ifdef __LP64__ +#ifdef CONFIG_64BIT #define CMPIB cmpib,* #define CMPB cmpb,* #define COND(x) *x @@ -67,19 +67,22 @@ /* Switch to virtual mapping, trashing only %r1 */ .macro virt_map - rsm PSW_SM_Q,%r0 - tovirt_r1 %r29 - mfsp %sr7, %r1 - or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */ - mtsp %r1, %sr3 + /* pcxt_ssm_bug */ + rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ mtsp %r0, %sr4 mtsp %r0, %sr5 + mfsp %sr7, %r1 + or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */ + mtsp %r1, %sr3 + tovirt_r1 %r29 + load32 KERNEL_PSW, %r1 + + rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ mtsp %r0, %sr6 mtsp %r0, %sr7 - load32 KERNEL_PSW, %r1 - mtctl %r1, %cr22 mtctl %r0, %cr17 /* Clear IIASQ tail */ mtctl %r0, %cr17 /* Clear IIASQ head */ + mtctl %r1, %ipsw load32 4f, %r1 mtctl %r1, %cr18 /* Set IIAOQ tail */ ldo 4(%r1), %r1 @@ -214,7 +217,7 @@ va = r8 /* virtual address for which the trap occured */ spc = r24 /* space for which the trap occured */ -#ifndef __LP64__ +#ifndef CONFIG_64BIT /* * itlb miss interruption handler (parisc 1.1 - 32 bit) @@ -236,7 +239,7 @@ .macro itlb_20 code mfctl %pcsq, spc -#ifdef __LP64__ +#ifdef CONFIG_64BIT b itlb_miss_20w #else b itlb_miss_20 @@ -246,7 +249,7 @@ .align 32 .endm -#ifndef __LP64__ +#ifndef CONFIG_64BIT /* * naitlb miss interruption handler (parisc 1.1 - 32 bit) * @@ -283,7 +286,7 @@ .macro naitlb_20 code mfctl %isr,spc -#ifdef __LP64__ +#ifdef CONFIG_64BIT b itlb_miss_20w #else b itlb_miss_20 @@ -296,7 +299,7 @@ .align 32 .endm -#ifndef __LP64__ +#ifndef CONFIG_64BIT /* * dtlb miss interruption handler (parisc 1.1 - 32 bit) */ @@ -318,7 +321,7 @@ .macro dtlb_20 code mfctl %isr, spc -#ifdef __LP64__ +#ifdef CONFIG_64BIT b dtlb_miss_20w #else b dtlb_miss_20 @@ -328,7 +331,7 @@ .align 32 .endm -#ifndef __LP64__ +#ifndef CONFIG_64BIT /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ .macro nadtlb_11 code @@ -346,7 +349,7 @@ .macro nadtlb_20 code mfctl %isr,spc -#ifdef __LP64__ +#ifdef CONFIG_64BIT b nadtlb_miss_20w #else b nadtlb_miss_20 @@ -356,7 +359,7 @@ .align 32 .endm -#ifndef __LP64__ +#ifndef CONFIG_64BIT /* * dirty bit trap interruption handler (parisc 1.1 - 32 bit) */ @@ -378,7 +381,7 @@ .macro dbit_20 code mfctl %isr,spc -#ifdef __LP64__ +#ifdef CONFIG_64BIT b dbit_trap_20w #else b dbit_trap_20 @@ -391,7 +394,7 @@ /* The following are simple 32 vs 64 bit instruction * abstractions for the macros */ .macro EXTR reg1,start,length,reg2 -#ifdef __LP64__ +#ifdef CONFIG_64BIT extrd,u \reg1,32+\start,\length,\reg2 #else extrw,u \reg1,\start,\length,\reg2 @@ -399,7 +402,7 @@ .endm .macro DEP reg1,start,length,reg2 -#ifdef __LP64__ +#ifdef CONFIG_64BIT depd \reg1,32+\start,\length,\reg2 #else depw \reg1,\start,\length,\reg2 @@ -407,7 +410,7 @@ .endm .macro DEPI val,start,length,reg -#ifdef __LP64__ +#ifdef CONFIG_64BIT depdi \val,32+\start,\length,\reg #else depwi \val,\start,\length,\reg @@ -418,7 +421,7 @@ * fault. We have to extract this and place it in the va, * zeroing the corresponding bits in the space register */ .macro space_adjust spc,va,tmp -#ifdef __LP64__ +#ifdef CONFIG_64BIT extrd,u \spc,63,SPACEID_SHIFT,\tmp depd %r0,63,SPACEID_SHIFT,\spc depd \tmp,31,SPACEID_SHIFT,\va @@ -476,7 +479,7 @@ bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ copy \pmd,%r9 -#ifdef __LP64__ +#ifdef CONFIG_64BIT shld %r9,PxD_VALUE_SHIFT,\pmd #else shlw %r9,PxD_VALUE_SHIFT,\pmd @@ -607,7 +610,7 @@ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault cmpib,COND(<>),n 0,\spc,\fault ldil L%(TMPALIAS_MAP_START),\tmp -#if defined(__LP64__) && (TMPALIAS_MAP_START >= 0x80000000) +#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) /* on LP64, ldi will sign extend into the upper 32 bits, * which is behaviour we don't want */ depdi 0,31,32,\tmp @@ -621,7 +624,7 @@ * OK, it is in the temp alias region, check whether "from" or "to". * Check "subtle" note in pacache.S re: r23/r26. */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT extrd,u,*= \va,41,1,%r0 #else extrw,u,= \va,9,1,%r0 @@ -688,7 +691,7 @@ fault_vector_20: def 30 def 31 -#ifndef __LP64__ +#ifndef CONFIG_64BIT .export fault_vector_11 @@ -761,7 +764,7 @@ __kernel_thread: copy %r30, %r1 ldo PT_SZ_ALGN(%r30),%r30 -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* Yo, function pointers in wide mode are little structs... -PB */ ldd 24(%r26), %r2 STREG %r2, PT_GR27(%r1) /* Store childs %dp */ @@ -777,7 +780,7 @@ __kernel_thread: or %r26, %r24, %r26 /* will have kernel mappings. */ ldi 1, %r25 /* stack_start, signals kernel thread */ stw %r0, -52(%r30) /* user_tid */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif BL do_fork, %r2 @@ -806,7 +809,7 @@ ret_from_kernel_thread: LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1 LDREG TASK_PT_GR25(%r1), %r26 -#ifdef __LP64__ +#ifdef CONFIG_64BIT LDREG TASK_PT_GR27(%r1), %r27 LDREG TASK_PT_GR22(%r1), %r22 #endif @@ -814,11 +817,16 @@ ret_from_kernel_thread: ble 0(%sr7, %r1) copy %r31, %r2 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ loadgp /* Thread could have been in a module */ #endif +#ifndef CONFIG_64BIT b sys_exit +#else + load32 sys_exit, %r1 + bv %r0(%r1) +#endif ldi 0, %r26 .import sys_execve, code @@ -830,7 +838,7 @@ __execve: STREG %r26, PT_GR26(%r16) STREG %r25, PT_GR25(%r16) STREG %r24, PT_GR24(%r16) -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif BL sys_execve, %r2 @@ -855,6 +863,7 @@ __execve: _switch_to: STREG %r2, -RP_OFFSET(%r30) + callee_save_float callee_save load32 _switch_to_ret, %r2 @@ -871,6 +880,7 @@ _switch_to: _switch_to_ret: mtctl %r0, %cr0 /* Needed for single stepping */ callee_rest + callee_rest_float LDREG -RP_OFFSET(%r30), %r2 bv %r0(%r2) @@ -888,9 +898,6 @@ _switch_to_ret: * this way, then we will need to copy %sr3 in to PT_SR[3..7], and * adjust IASQ[0..1]. * - * Note that the following code uses a "relied upon translation". - * See the parisc ACD for details. The ssm is necessary due to a - * PCXT bug. */ .align 4096 @@ -911,7 +918,7 @@ syscall_exit_rfi: STREG %r19,PT_IAOQ1(%r16) LDREG PT_PSW(%r16),%r19 load32 USER_PSW_MASK,%r1 -#ifdef __LP64__ +#ifdef CONFIG_64BIT load32 USER_PSW_HI_MASK,%r20 depd %r20,31,32,%r1 #endif @@ -955,7 +962,7 @@ intr_return: /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount ** irq_stat[] is defined using ____cacheline_aligned. */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT shld %r1, 6, %r20 #else shlw %r1, 5, %r20 @@ -963,9 +970,6 @@ intr_return: add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ #endif /* CONFIG_SMP */ - LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */ - cmpib,<>,n 0,%r20,intr_do_softirq /* forward */ - intr_check_resched: /* check for reschedule */ @@ -985,24 +989,19 @@ intr_restore: rest_fp %r1 rest_general %r29 - /* Create a "relied upon translation" PA 2.0 Arch. F-5 */ - ssm 0,%r0 - nop - nop - nop - nop - nop - nop - nop + /* inverse of virt_map */ + pcxt_ssm_bug + rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ tophys_r1 %r29 - rsm (PSW_SM_Q|PSW_SM_P|PSW_SM_D|PSW_SM_I),%r0 /* Restore space id's and special cr's from PT_REGS - * structure pointed to by r29 */ + * structure pointed to by r29 + */ rest_specials %r29 - /* Important: Note that rest_stack restores r29 - * last (we are using it)! It also restores r1 and r30. */ + /* IMPORTANT: rest_stack restores r29 last (we are using it)! + * It also restores r1 and r30. + */ rest_stack rfi @@ -1015,17 +1014,6 @@ intr_restore: nop nop - .import do_softirq,code -intr_do_softirq: - bl do_softirq,%r2 -#ifdef __LP64__ - ldo -16(%r30),%r29 /* Reference param save area */ -#else - nop -#endif - b intr_check_resched - nop - .import schedule,code intr_do_resched: /* Only do reschedule if we are returning to user space */ @@ -1036,12 +1024,17 @@ intr_do_resched: CMPIB= 0,%r20,intr_restore /* backward */ nop -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif ldil L%intr_check_sig, %r2 +#ifndef CONFIG_64BIT b schedule +#else + load32 schedule, %r20 + bv %r0(%r20) +#endif ldo R%intr_check_sig(%r2), %r2 @@ -1064,7 +1057,7 @@ intr_do_signal: copy %r0, %r24 /* unsigned long in_syscall */ copy %r16, %r25 /* struct pt_regs *regs */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif @@ -1088,7 +1081,7 @@ intr_extint: mfctl %cr31,%r1 copy %r30,%r17 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/ -#ifdef __LP64__ +#ifdef CONFIG_64BIT depdi 0,63,15,%r17 #else depi 0,31,15,%r17 @@ -1115,7 +1108,7 @@ intr_extint: ldil L%intr_return, %r2 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif @@ -1153,15 +1146,17 @@ intr_save: CMPIB=,n 6,%r26,skip_save_ior - /* save_specials left ipsw value in r8 for us to test */ mfctl %cr20, %r16 /* isr */ + nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ mfctl %cr21, %r17 /* ior */ -#ifdef __LP64__ + +#ifdef CONFIG_64BIT /* * If the interrupted code was running with W bit off (32 bit), * clear the b bits (bits 0 & 1) in the ior. + * save_specials left ipsw value in r8 for us to test. */ extrd,u,*<> %r8,PSW_W_BIT,1,%r0 depdi 0,1,2,%r17 @@ -1192,7 +1187,7 @@ skip_save_ior: loadgp copy %r29, %r25 /* arg1 is pt_regs */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif @@ -1230,7 +1225,7 @@ skip_save_ior: spc = r24 /* space for which the trap occured */ ptp = r25 /* page directory/page table pointer */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT dtlb_miss_20w: space_adjust spc,va,t0 @@ -1487,10 +1482,10 @@ nadtlb_emulate: add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ nadtlb_nullify: - mfctl %cr22,%r8 /* Get ipsw */ + mfctl %ipsw,%r8 ldil L%PSW_N,%r9 or %r8,%r9,%r8 /* Set PSW_N */ - mtctl %r8,%cr22 + mtctl %r8,%ipsw rfir nop @@ -1521,7 +1516,7 @@ nadtlb_probe_check: nop -#ifdef __LP64__ +#ifdef CONFIG_64BIT itlb_miss_20w: /* @@ -1588,7 +1583,7 @@ itlb_miss_20: #endif -#ifdef __LP64__ +#ifdef CONFIG_64BIT dbit_trap_20w: space_adjust spc,va,t0 @@ -1797,7 +1792,7 @@ sys_fork_wrapper: STREG %r2,-RP_OFFSET(%r30) ldo FRAME_SIZE(%r30),%r30 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif @@ -1847,10 +1842,11 @@ sys_clone_wrapper: STREG %r2,-RP_OFFSET(%r30) ldo FRAME_SIZE(%r30),%r30 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif + /* WARNING - Clobbers r19 and r21, userspace must save these! */ STREG %r2,PT_GR19(%r1) /* save for child */ STREG %r30,PT_GR21(%r1) BL sys_clone,%r2 @@ -1869,7 +1865,7 @@ sys_vfork_wrapper: STREG %r2,-RP_OFFSET(%r30) ldo FRAME_SIZE(%r30),%r30 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif @@ -1897,10 +1893,10 @@ sys_vfork_wrapper: STREG %r2,-RP_OFFSET(%r30) ldo FRAME_SIZE(%r30),%r30 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl \execve,%r2 + BL \execve,%r2 copy %r1,%arg0 ldo -FRAME_SIZE(%r30),%r30 @@ -1923,7 +1919,7 @@ error_\execve: sys_execve_wrapper: execve_wrapper sys_execve -#ifdef __LP64__ +#ifdef CONFIG_64BIT .export sys32_execve_wrapper .import sys32_execve @@ -1937,7 +1933,7 @@ sys_rt_sigreturn_wrapper: ldo TASK_REGS(%r26),%r26 /* get pt regs */ /* Don't save regs, we are going to restore them from sigcontext. */ STREG %r2, -RP_OFFSET(%r30) -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo FRAME_SIZE(%r30), %r30 BL sys_rt_sigreturn,%r2 ldo -16(%r30),%r29 /* Reference param save area */ @@ -1968,7 +1964,7 @@ sys_sigaltstack_wrapper: ldo TASK_REGS(%r1),%r24 /* get pt regs */ LDREG TASK_PT_GR30(%r24),%r24 STREG %r2, -RP_OFFSET(%r30) -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo FRAME_SIZE(%r30), %r30 b,l do_sigaltstack,%r2 ldo -16(%r30),%r29 /* Reference param save area */ @@ -1982,7 +1978,7 @@ sys_sigaltstack_wrapper: bv %r0(%r2) nop -#ifdef __LP64__ +#ifdef CONFIG_64BIT .export sys32_sigaltstack_wrapper sys32_sigaltstack_wrapper: /* Get the user stack pointer */ @@ -2006,7 +2002,7 @@ sys_rt_sigsuspend_wrapper: reg_save %r24 STREG %r2, -RP_OFFSET(%r30) -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo FRAME_SIZE(%r30), %r30 b,l sys_rt_sigsuspend,%r2 ldo -16(%r30),%r29 /* Reference param save area */ @@ -2079,7 +2075,7 @@ syscall_check_bh: ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */ /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT shld %r26, 6, %r20 #else shlw %r26, 5, %r20 @@ -2087,9 +2083,6 @@ syscall_check_bh: add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ #endif /* CONFIG_SMP */ - LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */ - cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */ - syscall_check_resched: /* check for reschedule */ @@ -2144,7 +2137,7 @@ syscall_restore: depi 3,31,2,%r31 /* ensure return to user mode. */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* decide whether to reset the wide mode bit * * For a syscall, the W bit is stored in the lowest bit @@ -2227,20 +2220,10 @@ pt_regs_ok: b intr_restore nop - .import do_softirq,code -syscall_do_softirq: - bl do_softirq,%r2 - nop - /* NOTE: We enable I-bit incase we schedule later, - * and we might be going back to userspace if we were - * traced. */ - b syscall_check_resched - ssm PSW_SM_I, %r0 /* do_softirq returns with I bit off */ - .import schedule,code syscall_do_resched: BL schedule,%r2 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #else nop @@ -2260,7 +2243,7 @@ syscall_do_signal: ldi 1, %r24 /* unsigned long in_syscall */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif BL do_signal,%r2 diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index f244fb200db1..553f8fe03224 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -83,15 +83,15 @@ static unsigned long pdc_result2[32] __attribute__ ((aligned (8))); int parisc_narrow_firmware = 1; #endif -/* on all currently-supported platforms, IODC I/O calls are always - * 32-bit calls, and MEM_PDC calls are always the same width as the OS. - * This means Cxxx boxes can't run wide kernels right now. -PB +/* On most currently-supported platforms, IODC I/O calls are 32-bit calls + * and MEM_PDC calls are always the same width as the OS. + * Some PAT boxes may have 64-bit IODC I/O. * - * CONFIG_PDC_NARROW has been added to allow 64-bit kernels to run on - * systems with 32-bit MEM_PDC calls. This will allow wide kernels to - * run on Cxxx boxes now. -RB - * - * Note that some PAT boxes may have 64-bit IODC I/O... + * Ryan Bradetich added the now obsolete CONFIG_PDC_NARROW to allow + * 64-bit kernels to run on systems with 32-bit MEM_PDC calls. + * This allowed wide kernels to run on Cxxx boxes. + * We now detect 32-bit-only PDC and dynamically switch to 32-bit mode + * when running a 64-bit kernel on such boxes (e.g. C200 or C360). */ #ifdef __LP64__ diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index 28405edf8448..0b47afc20690 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S @@ -12,7 +12,7 @@ * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de> */ -#include <linux/autoconf.h> /* for CONFIG_SMP */ +#include <linux/config.h> /* for CONFIG_SMP */ #include <asm/asm-offsets.h> #include <asm/psw.h> @@ -36,10 +36,10 @@ boot_args: .align 4 .import init_thread_union,data .import fault_vector_20,code /* IVA parisc 2.0 32 bit */ -#ifndef __LP64__ +#ifndef CONFIG_64BIT .import fault_vector_11,code /* IVA parisc 1.1 32 bit */ .import $global$ /* forward declaration */ -#endif /*!LP64*/ +#endif /*!CONFIG_64BIT*/ .export stext .export _stext,data /* Kernel want it this way! */ _stext: @@ -76,7 +76,7 @@ $bss_loop: mtctl %r4,%cr24 /* Initialize kernel root pointer */ mtctl %r4,%cr25 /* Initialize user root pointer */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* Set pmd in pgd */ load32 PA(pmd0),%r5 shrd %r5,PxD_VALUE_SHIFT,%r3 @@ -99,7 +99,7 @@ $bss_loop: stw %r3,0(%r4) ldo (ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3 addib,> -1,%r1,1b -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo ASM_PMD_ENTRY_SIZE(%r4),%r4 #else ldo ASM_PGD_ENTRY_SIZE(%r4),%r4 @@ -170,7 +170,7 @@ common_stext: stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */ #endif /*CONFIG_SMP*/ -#ifdef __LP64__ +#ifdef CONFIG_64BIT tophys_r1 %sp /* Save the rfi target address */ @@ -224,8 +224,6 @@ stext_pdc_ret: mtctl %r0,%cr12 mtctl %r0,%cr13 - /* Prepare to RFI! Man all the cannons! */ - /* Initialize the global data pointer */ loadgp @@ -235,7 +233,7 @@ stext_pdc_ret: * following short sequence of instructions can determine this * (without being illegal on a PA1.1 machine). */ -#ifndef __LP64__ +#ifndef CONFIG_64BIT ldi 32,%r10 mtctl %r10,%cr11 .level 2.0 @@ -248,52 +246,22 @@ stext_pdc_ret: $is_pa20: .level LEVEL /* restore 1.1 || 2.0w */ -#endif /*!LP64*/ +#endif /*!CONFIG_64BIT*/ load32 PA(fault_vector_20),%r10 $install_iva: mtctl %r10,%cr14 -#ifdef __LP64__ - b aligned_rfi + b aligned_rfi /* Prepare to RFI! Man all the cannons! */ nop - .align 256 + .align 128 aligned_rfi: - ssm 0,0 - nop /* 1 */ - nop /* 2 */ - nop /* 3 */ - nop /* 4 */ - nop /* 5 */ - nop /* 6 */ - nop /* 7 */ - nop /* 8 */ -#endif + pcxt_ssm_bug -#ifdef __LP64__ /* move to psw.h? */ -#define PSW_BITS PSW_Q+PSW_I+PSW_D+PSW_P+PSW_R -#else -#define PSW_BITS PSW_SM_Q -#endif + rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */ + /* Don't need NOPs, have 8 compliant insn before rfi */ -$rfi: - /* turn off troublesome PSW bits */ - rsm PSW_BITS,%r0 - - /* kernel PSW: - * - no interruptions except HPMC and TOC (which are handled by PDC) - * - Q bit set (IODC / PDC interruptions) - * - big-endian - * - virtually mapped - */ - load32 KERNEL_PSW,%r10 - mtctl %r10,%ipsw - - /* Set the space pointers for the post-RFI world - ** Clear the two-level IIA Space Queue, effectively setting - ** Kernel space. - */ mtctl %r0,%cr17 /* Clear IIASQ tail */ mtctl %r0,%cr17 /* Clear IIASQ head */ @@ -301,8 +269,11 @@ $rfi: mtctl %r11,%cr18 /* IIAOQ head */ ldo 4(%r11),%r11 mtctl %r11,%cr18 /* IIAOQ tail */ + + load32 KERNEL_PSW,%r10 + mtctl %r10,%ipsw - /* Jump to hyperspace */ + /* Jump through hyperspace to Virt Mode */ rfi nop @@ -313,7 +284,7 @@ $rfi: .import smp_init_current_idle_task,data .import smp_callin,code -#ifndef __LP64__ +#ifndef CONFIG_64BIT smp_callin_rtn: .proc .callinfo @@ -321,7 +292,7 @@ smp_callin_rtn: nop nop .procend -#endif /*!LP64*/ +#endif /*!CONFIG_64BIT*/ /*************************************************************************** * smp_slave_stext is executed by all non-monarch Processors when the Monarch @@ -356,7 +327,7 @@ smp_slave_stext: mtctl %r4,%cr24 /* Initialize kernel root pointer */ mtctl %r4,%cr25 /* Initialize user root pointer */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* Setup PDCE_PROC entry */ copy %arg0,%r3 #else @@ -373,7 +344,7 @@ smp_slave_stext: .procend #endif /* CONFIG_SMP */ -#ifndef __LP64__ +#ifndef CONFIG_64BIT .data .align 4 @@ -383,4 +354,4 @@ smp_slave_stext: .size $global$,4 $global$: .word 0 -#endif /*!LP64*/ +#endif /*!CONFIG_64BIT*/ diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index 1a1c66422736..8f563871e83c 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c @@ -188,7 +188,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index) temp = pa_pdc_cell.cba; dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path); if (!dev) { - return PDC_NE_MOD; + return PDC_OK; } /* alloc_pa_dev sets dev->hpa */ diff --git a/arch/parisc/kernel/ioctl32.c b/arch/parisc/kernel/ioctl32.c index 1d3824b670d1..4eada1bb27f0 100644 --- a/arch/parisc/kernel/ioctl32.c +++ b/arch/parisc/kernel/ioctl32.c @@ -19,550 +19,6 @@ #define CODE #include "compat_ioctl.c" -/* Use this to get at 32-bit user passed pointers. - See sys_sparc32.c for description about these. */ -#define A(__x) ((unsigned long)(__x)) -/* The same for use with copy_from_user() and copy_to_user(). */ -#define B(__x) ((void *)(unsigned long)(__x)) - -#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE) -/* This really belongs in include/linux/drm.h -DaveM */ -#include "../../../drivers/char/drm/drm.h" - -typedef struct drm32_version { - int version_major; /* Major version */ - int version_minor; /* Minor version */ - int version_patchlevel;/* Patch level */ - int name_len; /* Length of name buffer */ - u32 name; /* Name of driver */ - int date_len; /* Length of date buffer */ - u32 date; /* User-space buffer to hold date */ - int desc_len; /* Length of desc buffer */ - u32 desc; /* User-space buffer to hold desc */ -} drm32_version_t; -#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t) - -static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg) -{ - drm32_version_t *uversion = (drm32_version_t *)arg; - char *name_ptr, *date_ptr, *desc_ptr; - u32 tmp1, tmp2, tmp3; - drm_version_t kversion; - mm_segment_t old_fs; - int ret; - - memset(&kversion, 0, sizeof(kversion)); - if (get_user(kversion.name_len, &uversion->name_len) || - get_user(kversion.date_len, &uversion->date_len) || - get_user(kversion.desc_len, &uversion->desc_len) || - get_user(tmp1, &uversion->name) || - get_user(tmp2, &uversion->date) || - get_user(tmp3, &uversion->desc)) - return -EFAULT; - - name_ptr = (char *) A(tmp1); - date_ptr = (char *) A(tmp2); - desc_ptr = (char *) A(tmp3); - - ret = -ENOMEM; - if (kversion.name_len && name_ptr) { - kversion.name = kmalloc(kversion.name_len, GFP_KERNEL); - if (!kversion.name) - goto out; - } - if (kversion.date_len && date_ptr) { - kversion.date = kmalloc(kversion.date_len, GFP_KERNEL); - if (!kversion.date) - goto out; - } - if (kversion.desc_len && desc_ptr) { - kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL); - if (!kversion.desc) - goto out; - } - - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion); - set_fs(old_fs); - - if (!ret) { - if ((kversion.name && - copy_to_user(name_ptr, kversion.name, kversion.name_len)) || - (kversion.date && - copy_to_user(date_ptr, kversion.date, kversion.date_len)) || - (kversion.desc && - copy_to_user(desc_ptr, kversion.desc, kversion.desc_len))) - ret = -EFAULT; - if (put_user(kversion.version_major, &uversion->version_major) || - put_user(kversion.version_minor, &uversion->version_minor) || - put_user(kversion.version_patchlevel, &uversion->version_patchlevel) || - put_user(kversion.name_len, &uversion->name_len) || - put_user(kversion.date_len, &uversion->date_len) || - put_user(kversion.desc_len, &uversion->desc_len)) - ret = -EFAULT; - } - -out: - if (kversion.name) - kfree(kversion.name); - if (kversion.date) - kfree(kversion.date); - if (kversion.desc) - kfree(kversion.desc); - return ret; -} - -typedef struct drm32_unique { - int unique_len; /* Length of unique */ - u32 unique; /* Unique name for driver instantiation */ -} drm32_unique_t; -#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t) -#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t) - -static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg) -{ - drm32_unique_t *uarg = (drm32_unique_t *)arg; - drm_unique_t karg; - mm_segment_t old_fs; - char *uptr; - u32 tmp; - int ret; - - if (get_user(karg.unique_len, &uarg->unique_len)) - return -EFAULT; - karg.unique = NULL; - - if (get_user(tmp, &uarg->unique)) - return -EFAULT; - - uptr = (char *) A(tmp); - - if (uptr) { - karg.unique = kmalloc(karg.unique_len, GFP_KERNEL); - if (!karg.unique) - return -ENOMEM; - if (cmd == DRM32_IOCTL_SET_UNIQUE && - copy_from_user(karg.unique, uptr, karg.unique_len)) { - kfree(karg.unique); - return -EFAULT; - } - } - - old_fs = get_fs(); - set_fs(KERNEL_DS); - if (cmd == DRM32_IOCTL_GET_UNIQUE) - ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg); - else - ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg); - set_fs(old_fs); - - if (!ret) { - if (cmd == DRM32_IOCTL_GET_UNIQUE && - uptr != NULL && - copy_to_user(uptr, karg.unique, karg.unique_len)) - ret = -EFAULT; - if (put_user(karg.unique_len, &uarg->unique_len)) - ret = -EFAULT; - } - - if (karg.unique != NULL) - kfree(karg.unique); - - return ret; -} - -typedef struct drm32_map { - u32 offset; /* Requested physical address (0 for SAREA)*/ - u32 size; /* Requested physical size (bytes) */ - drm_map_type_t type; /* Type of memory to map */ - drm_map_flags_t flags; /* Flags */ - u32 handle; /* User-space: "Handle" to pass to mmap */ - /* Kernel-space: kernel-virtual address */ - int mtrr; /* MTRR slot used */ - /* Private data */ -} drm32_map_t; -#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t) - -static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg) -{ - drm32_map_t *uarg = (drm32_map_t *) arg; - drm_map_t karg; - mm_segment_t old_fs; - u32 tmp; - int ret; - - ret = get_user(karg.offset, &uarg->offset); - ret |= get_user(karg.size, &uarg->size); - ret |= get_user(karg.type, &uarg->type); - ret |= get_user(karg.flags, &uarg->flags); - ret |= get_user(tmp, &uarg->handle); - ret |= get_user(karg.mtrr, &uarg->mtrr); - if (ret) - return -EFAULT; - - karg.handle = (void *) A(tmp); - - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg); - set_fs(old_fs); - - if (!ret) { - ret = put_user(karg.offset, &uarg->offset); - ret |= put_user(karg.size, &uarg->size); - ret |= put_user(karg.type, &uarg->type); - ret |= put_user(karg.flags, &uarg->flags); - tmp = (u32) (long)karg.handle; - ret |= put_user(tmp, &uarg->handle); - ret |= put_user(karg.mtrr, &uarg->mtrr); - if (ret) - ret = -EFAULT; - } - - return ret; -} - -typedef struct drm32_buf_info { - int count; /* Entries in list */ - u32 list; /* (drm_buf_desc_t *) */ -} drm32_buf_info_t; -#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t) - -static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg) -{ - drm32_buf_info_t *uarg = (drm32_buf_info_t *)arg; - drm_buf_desc_t *ulist; - drm_buf_info_t karg; - mm_segment_t old_fs; - int orig_count, ret; - u32 tmp; - - if (get_user(karg.count, &uarg->count) || - get_user(tmp, &uarg->list)) - return -EFAULT; - - ulist = (drm_buf_desc_t *) A(tmp); - - orig_count = karg.count; - - karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL); - if (!karg.list) - return -EFAULT; - - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg); - set_fs(old_fs); - - if (!ret) { - if (karg.count <= orig_count && - (copy_to_user(ulist, karg.list, - karg.count * sizeof(drm_buf_desc_t)))) - ret = -EFAULT; - if (put_user(karg.count, &uarg->count)) - ret = -EFAULT; - } - - kfree(karg.list); - - return ret; -} - -typedef struct drm32_buf_free { - int count; - u32 list; /* (int *) */ -} drm32_buf_free_t; -#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t) - -static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg) -{ - drm32_buf_free_t *uarg = (drm32_buf_free_t *)arg; - drm_buf_free_t karg; - mm_segment_t old_fs; - int *ulist; - int ret; - u32 tmp; - - if (get_user(karg.count, &uarg->count) || - get_user(tmp, &uarg->list)) - return -EFAULT; - - ulist = (int *) A(tmp); - - karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL); - if (!karg.list) - return -ENOMEM; - - ret = -EFAULT; - if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int)))) - goto out; - - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg); - set_fs(old_fs); - -out: - kfree(karg.list); - - return ret; -} - -typedef struct drm32_buf_pub { - int idx; /* Index into master buflist */ - int total; /* Buffer size */ - int used; /* Amount of buffer in use (for DMA) */ - u32 address; /* Address of buffer (void *) */ -} drm32_buf_pub_t; - -typedef struct drm32_buf_map { - int count; /* Length of buflist */ - u32 virtual; /* Mmaped area in user-virtual (void *) */ - u32 list; /* Buffer information (drm_buf_pub_t *) */ -} drm32_buf_map_t; -#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t) - -static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg) -{ - drm32_buf_map_t *uarg = (drm32_buf_map_t *)arg; - drm32_buf_pub_t *ulist; - drm_buf_map_t karg; - mm_segment_t old_fs; - int orig_count, ret, i; - u32 tmp1, tmp2; - - if (get_user(karg.count, &uarg->count) || - get_user(tmp1, &uarg->virtual) || - get_user(tmp2, &uarg->list)) - return -EFAULT; - - karg.virtual = (void *) A(tmp1); - ulist = (drm32_buf_pub_t *) A(tmp2); - - orig_count = karg.count; - - karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL); - if (!karg.list) - return -ENOMEM; - - ret = -EFAULT; - for (i = 0; i < karg.count; i++) { - if (get_user(karg.list[i].idx, &ulist[i].idx) || - get_user(karg.list[i].total, &ulist[i].total) || - get_user(karg.list[i].used, &ulist[i].used) || - get_user(tmp1, &ulist[i].address)) - goto out; - - karg.list[i].address = (void *) A(tmp1); - } - - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg); - set_fs(old_fs); - - if (!ret) { - for (i = 0; i < orig_count; i++) { - tmp1 = (u32) (long) karg.list[i].address; - if (put_user(karg.list[i].idx, &ulist[i].idx) || - put_user(karg.list[i].total, &ulist[i].total) || - put_user(karg.list[i].used, &ulist[i].used) || - put_user(tmp1, &ulist[i].address)) { - ret = -EFAULT; - goto out; - } - } - if (put_user(karg.count, &uarg->count)) - ret = -EFAULT; - } - -out: - kfree(karg.list); - return ret; -} - -typedef struct drm32_dma { - /* Indices here refer to the offset into - buflist in drm_buf_get_t. */ - int context; /* Context handle */ - int send_count; /* Number of buffers to send */ - u32 send_indices; /* List of handles to buffers (int *) */ - u32 send_sizes; /* Lengths of data to send (int *) */ - drm_dma_flags_t flags; /* Flags */ - int request_count; /* Number of buffers requested */ - int request_size; /* Desired size for buffers */ - u32 request_indices; /* Buffer information (int *) */ - u32 request_sizes; /* (int *) */ - int granted_count; /* Number of buffers granted */ -} drm32_dma_t; -#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t) - -/* RED PEN The DRM layer blindly dereferences the send/request - * indice/size arrays even though they are userland - * pointers. -DaveM - */ -static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg) -{ - drm32_dma_t *uarg = (drm32_dma_t *) arg; - int *u_si, *u_ss, *u_ri, *u_rs; - drm_dma_t karg; - mm_segment_t old_fs; - int ret; - u32 tmp1, tmp2, tmp3, tmp4; - - karg.send_indices = karg.send_sizes = NULL; - karg.request_indices = karg.request_sizes = NULL; - - if (get_user(karg.context, &uarg->context) || - get_user(karg.send_count, &uarg->send_count) || - get_user(tmp1, &uarg->send_indices) || - get_user(tmp2, &uarg->send_sizes) || - get_user(karg.flags, &uarg->flags) || - get_user(karg.request_count, &uarg->request_count) || - get_user(karg.request_size, &uarg->request_size) || - get_user(tmp3, &uarg->request_indices) || - get_user(tmp4, &uarg->request_sizes) || - get_user(karg.granted_count, &uarg->granted_count)) - return -EFAULT; - - u_si = (int *) A(tmp1); - u_ss = (int *) A(tmp2); - u_ri = (int *) A(tmp3); - u_rs = (int *) A(tmp4); - - if (karg.send_count) { - karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL); - karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL); - - ret = -ENOMEM; - if (!karg.send_indices || !karg.send_sizes) - goto out; - - ret = -EFAULT; - if (copy_from_user(karg.send_indices, u_si, - (karg.send_count * sizeof(int))) || - copy_from_user(karg.send_sizes, u_ss, - (karg.send_count * sizeof(int)))) - goto out; - } - - if (karg.request_count) { - karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL); - karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL); - - ret = -ENOMEM; - if (!karg.request_indices || !karg.request_sizes) - goto out; - - ret = -EFAULT; - if (copy_from_user(karg.request_indices, u_ri, - (karg.request_count * sizeof(int))) || - copy_from_user(karg.request_sizes, u_rs, - (karg.request_count * sizeof(int)))) - goto out; - } - - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg); - set_fs(old_fs); - - if (!ret) { - if (put_user(karg.context, &uarg->context) || - put_user(karg.send_count, &uarg->send_count) || - put_user(karg.flags, &uarg->flags) || - put_user(karg.request_count, &uarg->request_count) || - put_user(karg.request_size, &uarg->request_size) || - put_user(karg.granted_count, &uarg->granted_count)) - ret = -EFAULT; - - if (karg.send_count) { - if (copy_to_user(u_si, karg.send_indices, - (karg.send_count * sizeof(int))) || - copy_to_user(u_ss, karg.send_sizes, - (karg.send_count * sizeof(int)))) - ret = -EFAULT; - } - if (karg.request_count) { - if (copy_to_user(u_ri, karg.request_indices, - (karg.request_count * sizeof(int))) || - copy_to_user(u_rs, karg.request_sizes, - (karg.request_count * sizeof(int)))) - ret = -EFAULT; - } - } - -out: - if (karg.send_indices) - kfree(karg.send_indices); - if (karg.send_sizes) - kfree(karg.send_sizes); - if (karg.request_indices) - kfree(karg.request_indices); - if (karg.request_sizes) - kfree(karg.request_sizes); - - return ret; -} - -typedef struct drm32_ctx_res { - int count; - u32 contexts; /* (drm_ctx_t *) */ -} drm32_ctx_res_t; -#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t) - -static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg) -{ - drm32_ctx_res_t *uarg = (drm32_ctx_res_t *) arg; - drm_ctx_t *ulist; - drm_ctx_res_t karg; - mm_segment_t old_fs; - int orig_count, ret; - u32 tmp; - - karg.contexts = NULL; - if (get_user(karg.count, &uarg->count) || - get_user(tmp, &uarg->contexts)) - return -EFAULT; - - ulist = (drm_ctx_t *) A(tmp); - - orig_count = karg.count; - if (karg.count && ulist) { - karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL); - if (!karg.contexts) - return -ENOMEM; - if (copy_from_user(karg.contexts, ulist, - (karg.count * sizeof(drm_ctx_t)))) { - kfree(karg.contexts); - return -EFAULT; - } - } - - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg); - set_fs(old_fs); - - if (!ret) { - if (orig_count) { - if (copy_to_user(ulist, karg.contexts, - (orig_count * sizeof(drm_ctx_t)))) - ret = -EFAULT; - } - if (put_user(karg.count, &uarg->count)) - ret = -EFAULT; - } - - if (karg.contexts) - kfree(karg.contexts); - - return ret; -} - -#endif - #define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL }, #define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl) @@ -575,16 +31,6 @@ IOCTL_TABLE_START #define DECLARES #include "compat_ioctl.c" -/* Might be moved to compat_ioctl.h with some ifdefs... */ -COMPATIBLE_IOCTL(TIOCSTART) -COMPATIBLE_IOCTL(TIOCSTOP) -COMPATIBLE_IOCTL(TIOCSLTC) - -/* PA-specific ioctls */ -COMPATIBLE_IOCTL(PA_PERF_ON) -COMPATIBLE_IOCTL(PA_PERF_OFF) -COMPATIBLE_IOCTL(PA_PERF_VERSION) - /* And these ioctls need translation */ HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc) HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc) @@ -609,17 +55,6 @@ HANDLE_IOCTL(RTC_EPOCH_READ, w_long) COMPATIBLE_IOCTL(RTC_EPOCH_SET) #endif -#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE) -HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version); -HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique); -HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique); -HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap); -HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs); -HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs); -HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs); -HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma); -HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx); -#endif /* DRM */ IOCTL_TABLE_END int ioctl_table_size = ARRAY_SIZE(ioctl_start); diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 006385dbee66..197936d9359a 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -30,6 +30,9 @@ #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/types.h> +#include <asm/io.h> + +#include <asm/smp.h> #undef PARISC_IRQ_CR16_COUNTS @@ -43,26 +46,34 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *); */ static volatile unsigned long cpu_eiem = 0; -static void cpu_set_eiem(void *info) -{ - set_eiem((unsigned long) info); -} - -static inline void cpu_disable_irq(unsigned int irq) +static void cpu_disable_irq(unsigned int irq) { unsigned long eirr_bit = EIEM_MASK(irq); cpu_eiem &= ~eirr_bit; - on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); + /* Do nothing on the other CPUs. If they get this interrupt, + * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't + * handle it, and the set_eiem() at the bottom will ensure it + * then gets disabled */ } static void cpu_enable_irq(unsigned int irq) { unsigned long eirr_bit = EIEM_MASK(irq); - mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */ cpu_eiem |= eirr_bit; - on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); + + /* FIXME: while our interrupts aren't nested, we cannot reset + * the eiem mask if we're already in an interrupt. Once we + * implement nested interrupts, this can go away + */ + if (!in_interrupt()) + set_eiem(cpu_eiem); + + /* This is just a simple NOP IPI. But what it does is cause + * all the other CPUs to do a set_eiem(cpu_eiem) at the end + * of the interrupt handler */ + smp_send_all_nop(); } static unsigned int cpu_startup_irq(unsigned int irq) @@ -74,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq) void no_ack_irq(unsigned int irq) { } void no_end_irq(unsigned int irq) { } +#ifdef CONFIG_SMP +int cpu_check_affinity(unsigned int irq, cpumask_t *dest) +{ + int cpu_dest; + + /* timer and ipi have to always be received on all CPUs */ + if (irq == TIMER_IRQ || irq == IPI_IRQ) { + /* Bad linux design decision. The mask has already + * been set; we must reset it */ + irq_affinity[irq] = CPU_MASK_ALL; + return -EINVAL; + } + + /* whatever mask they set, we just allow one CPU */ + cpu_dest = first_cpu(*dest); + *dest = cpumask_of_cpu(cpu_dest); + + return 0; +} + +static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest) +{ + if (cpu_check_affinity(irq, &dest)) + return; + + irq_affinity[irq] = dest; +} +#endif + static struct hw_interrupt_type cpu_interrupt_type = { .typename = "CPU", .startup = cpu_startup_irq, @@ -82,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = { .disable = cpu_disable_irq, .ack = no_ack_irq, .end = no_end_irq, -// .set_affinity = cpu_set_affinity_irq, +#ifdef CONFIG_SMP + .set_affinity = cpu_set_affinity_irq, +#endif }; int show_interrupts(struct seq_file *p, void *v) @@ -219,6 +261,17 @@ int txn_alloc_irq(unsigned int bits_wide) return -1; } + +unsigned long txn_affinity_addr(unsigned int irq, int cpu) +{ +#ifdef CONFIG_SMP + irq_affinity[irq] = cpumask_of_cpu(cpu); +#endif + + return cpu_data[cpu].txn_addr; +} + + unsigned long txn_alloc_addr(unsigned int virt_irq) { static int next_cpu = -1; @@ -233,7 +286,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq) if (next_cpu >= NR_CPUS) next_cpu = 0; /* nothing else, assign monarch */ - return cpu_data[next_cpu].txn_addr; + return txn_affinity_addr(virt_irq, next_cpu); } @@ -250,10 +303,11 @@ void do_cpu_irq_mask(struct pt_regs *regs) irq_enter(); /* - * Only allow interrupt processing to be interrupted by the - * timer tick + * Don't allow TIMER or IPI nested interrupts. + * Allowing any single interrupt to nest can lead to that CPU + * handling interrupts with all enabled interrupts unmasked. */ - set_eiem(EIEM_MASK(TIMER_IRQ)); + set_eiem(0UL); /* 1) only process IRQs that are enabled/unmasked (cpu_eiem) * 2) We loop here on EIRR contents in order to avoid @@ -267,23 +321,41 @@ void do_cpu_irq_mask(struct pt_regs *regs) if (!eirr_val) break; - if (eirr_val & EIEM_MASK(TIMER_IRQ)) - set_eiem(0); - mtctl(eirr_val, 23); /* reset bits we are going to process */ /* Work our way from MSb to LSb...same order we alloc EIRs */ for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) { +#ifdef CONFIG_SMP + cpumask_t dest = irq_affinity[irq]; +#endif if (!(bit & eirr_val)) continue; /* clear bit in mask - can exit loop sooner */ eirr_val &= ~bit; +#ifdef CONFIG_SMP + /* FIXME: because generic set affinity mucks + * with the affinity before sending it to us + * we can get the situation where the affinity is + * wrong for our CPU type interrupts */ + if (irq != TIMER_IRQ && irq != IPI_IRQ && + !cpu_isset(smp_processor_id(), dest)) { + int cpu = first_cpu(dest); + + printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n", + irq, smp_processor_id(), cpu); + gsc_writel(irq + CPU_IRQ_BASE, + cpu_data[cpu].hpa); + continue; + } +#endif + __do_IRQ(irq, regs); } } - set_eiem(cpu_eiem); + + set_eiem(cpu_eiem); /* restore original mask */ irq_exit(); } @@ -291,12 +363,14 @@ void do_cpu_irq_mask(struct pt_regs *regs) static struct irqaction timer_action = { .handler = timer_interrupt, .name = "timer", + .flags = SA_INTERRUPT, }; #ifdef CONFIG_SMP static struct irqaction ipi_action = { .handler = ipi_interrupt, .name = "IPI", + .flags = SA_INTERRUPT, }; #endif diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 77e03bc0f935..9534ee17b9be 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -26,7 +26,7 @@ * can be used. */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT #define ADDIB addib,* #define CMPB cmpb,* #define ANDCM andcm,* @@ -40,8 +40,10 @@ .level 2.0 #endif -#include <asm/assembly.h> +#include <linux/config.h> + #include <asm/psw.h> +#include <asm/assembly.h> #include <asm/pgtable.h> #include <asm/cache.h> @@ -62,32 +64,23 @@ flush_tlb_all_local: * to happen in real mode with all interruptions disabled. */ - /* - * Once again, we do the rfi dance ... some day we need examine - * all of our uses of this type of code and see what can be - * consolidated. - */ - - rsm PSW_SM_I, %r19 /* relied upon translation! PA 2.0 Arch. F-5 */ + /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */ + rsm PSW_SM_I, %r19 /* save I-bit state */ + load32 PA(1f), %r1 nop nop nop nop nop - nop - nop - - rsm PSW_SM_Q, %r0 /* Turn off Q bit to load iia queue */ - ldil L%REAL_MODE_PSW, %r1 - ldo R%REAL_MODE_PSW(%r1), %r1 - mtctl %r1, %cr22 + + rsm PSW_SM_Q, %r0 /* prep to load iia queue */ mtctl %r0, %cr17 /* Clear IIASQ tail */ mtctl %r0, %cr17 /* Clear IIASQ head */ - ldil L%PA(1f), %r1 - ldo R%PA(1f)(%r1), %r1 mtctl %r1, %cr18 /* IIAOQ head */ ldo 4(%r1), %r1 mtctl %r1, %cr18 /* IIAOQ tail */ + load32 REAL_MODE_PSW, %r1 + mtctl %r1, %ipsw rfi nop @@ -178,29 +171,36 @@ fdtonemiddle: /* Loop if LOOP = 1 */ ADDIB> -1, %r22, fdtoneloop /* Outer loop count decr */ add %r21, %r20, %r20 /* increment space */ -fdtdone: - /* Switch back to virtual mode */ +fdtdone: + /* + * Switch back to virtual mode + */ + /* pcxt_ssm_bug */ + rsm PSW_SM_I, %r0 + load32 2f, %r1 + nop + nop + nop + nop + nop - rsm PSW_SM_Q, %r0 /* clear Q bit to load iia queue */ - ldil L%KERNEL_PSW, %r1 - ldo R%KERNEL_PSW(%r1), %r1 - or %r1, %r19, %r1 /* Set I bit if set on entry */ - mtctl %r1, %cr22 + rsm PSW_SM_Q, %r0 /* prep to load iia queue */ mtctl %r0, %cr17 /* Clear IIASQ tail */ mtctl %r0, %cr17 /* Clear IIASQ head */ - ldil L%(2f), %r1 - ldo R%(2f)(%r1), %r1 mtctl %r1, %cr18 /* IIAOQ head */ ldo 4(%r1), %r1 mtctl %r1, %cr18 /* IIAOQ tail */ + load32 KERNEL_PSW, %r1 + or %r1, %r19, %r1 /* I-bit to state on entry */ + mtctl %r1, %ipsw /* restore I-bit (entire PSW) */ rfi nop 2: bv %r0(%r2) nop - .exit + .exit .procend .export flush_instruction_cache_local,code @@ -227,7 +227,7 @@ flush_instruction_cache_local: fimanyloop: /* Loop if LOOP >= 2 */ ADDIB> -1, %r31, fimanyloop /* Adjusted inner loop decr */ - fice 0(%sr1, %arg0) + fice %r0(%sr1, %arg0) fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */ movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */ ADDIB<=,n -1, %arg2, fisync /* Outer loop decr */ @@ -238,7 +238,7 @@ fioneloop: /* Loop if LOOP = 1 */ fisync: sync - mtsm %r22 + mtsm %r22 /* restore I-bit */ bv %r0(%r2) nop .exit @@ -269,7 +269,7 @@ flush_data_cache_local: fdmanyloop: /* Loop if LOOP >= 2 */ ADDIB> -1, %r31, fdmanyloop /* Adjusted inner loop decr */ - fdce 0(%sr1, %arg0) + fdce %r0(%sr1, %arg0) fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */ movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */ ADDIB<=,n -1, %arg2, fdsync /* Outer loop decr */ @@ -281,7 +281,7 @@ fdoneloop: /* Loop if LOOP = 1 */ fdsync: syncdma sync - mtsm %r22 + mtsm %r22 /* restore I-bit */ bv %r0(%r2) nop .exit @@ -296,7 +296,7 @@ copy_user_page_asm: .callinfo NO_CALLS .entry -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* PA8x00 CPUs can consume 2 loads or 1 store per cycle. * Unroll the loop by hand and arrange insn appropriately. * GCC probably can do this just as well. @@ -351,7 +351,11 @@ copy_user_page_asm: std %r22, 120(%r26) ldo 128(%r26), %r26 - ADDIB> -1, %r1, 1b /* bundle 10 */ + /* conditional branches nullify on forward taken branch, and on + * non-taken backward branch. Note that .+4 is a backwards branch. + * The ldd should only get executed if the branch is taken. + */ + ADDIB>,n -1, %r1, 1b /* bundle 10 */ ldd 0(%r25), %r19 /* start next loads */ #else @@ -363,10 +367,10 @@ copy_user_page_asm: * the full 64 bit register values on interrupt, we can't * use ldd/std on a 32 bit kernel. */ + ldw 0(%r25), %r19 ldi 64, %r1 /* PAGE_SIZE/64 == 64 */ 1: - ldw 0(%r25), %r19 ldw 4(%r25), %r20 ldw 8(%r25), %r21 ldw 12(%r25), %r22 @@ -396,11 +400,12 @@ copy_user_page_asm: ldw 60(%r25), %r22 stw %r19, 48(%r26) stw %r20, 52(%r26) + ldo 64(%r25), %r25 stw %r21, 56(%r26) stw %r22, 60(%r26) ldo 64(%r26), %r26 - ADDIB> -1, %r1, 1b - ldo 64(%r25), %r25 + ADDIB>,n -1, %r1, 1b + ldw 0(%r25), %r19 #endif bv %r0(%r2) nop @@ -456,7 +461,7 @@ copy_user_page_asm: sub %r25, %r1, %r23 /* move physical addr into non shadowed reg */ ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef __LP64__ +#ifdef CONFIG_64BIT extrd,u %r26,56,32, %r26 /* convert phys addr to tlb insert format */ extrd,u %r23,56,32, %r23 /* convert phys addr to tlb insert format */ depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ @@ -543,7 +548,7 @@ __clear_user_page_asm: tophys_r1 %r26 ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef __LP64__ +#ifdef CONFIG_64BIT #if (TMPALIAS_MAP_START >= 0x80000000) depdi 0, 31,32, %r28 /* clear any sign extension */ #endif @@ -560,7 +565,7 @@ __clear_user_page_asm: pdtlb 0(%r28) -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldi 32, %r1 /* PAGE_SIZE/128 == 32 */ /* PREFETCH (Write) has not (yet) been proven to help here */ @@ -585,7 +590,7 @@ __clear_user_page_asm: ADDIB> -1, %r1, 1b ldo 128(%r28), %r28 -#else /* ! __LP64 */ +#else /* ! CONFIG_64BIT */ ldi 64, %r1 /* PAGE_SIZE/64 == 64 */ @@ -608,7 +613,7 @@ __clear_user_page_asm: stw %r0, 60(%r28) ADDIB> -1, %r1, 1b ldo 64(%r28), %r28 -#endif /* __LP64 */ +#endif /* CONFIG_64BIT */ bv %r0(%r2) nop @@ -626,7 +631,7 @@ flush_kernel_dcache_page: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 -#ifdef __LP64__ +#ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 #else depwi,z 1, 31-PAGE_SHIFT,1, %r25 @@ -670,7 +675,7 @@ flush_user_dcache_page: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 -#ifdef __LP64__ +#ifdef CONFIG_64BIT depdi,z 1,63-PAGE_SHIFT,1, %r25 #else depwi,z 1,31-PAGE_SHIFT,1, %r25 @@ -714,7 +719,7 @@ flush_user_icache_page: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 -#ifdef __LP64__ +#ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 #else depwi,z 1, 31-PAGE_SHIFT,1, %r25 @@ -759,7 +764,7 @@ purge_kernel_dcache_page: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 -#ifdef __LP64__ +#ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 #else depwi,z 1, 31-PAGE_SHIFT,1, %r25 @@ -807,7 +812,7 @@ flush_alias_page: tophys_r1 %r26 ldil L%(TMPALIAS_MAP_START), %r28 -#ifdef __LP64__ +#ifdef CONFIG_64BIT extrd,u %r26, 56,32, %r26 /* convert phys addr to tlb insert format */ depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ depdi 0, 63,12, %r28 /* Clear any offset bits */ @@ -824,7 +829,7 @@ flush_alias_page: ldil L%dcache_stride, %r1 ldw R%dcache_stride(%r1), %r23 -#ifdef __LP64__ +#ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r29 #else depwi,z 1, 31-PAGE_SHIFT,1, %r29 @@ -935,7 +940,7 @@ flush_kernel_icache_page: ldil L%icache_stride, %r1 ldw R%icache_stride(%r1), %r23 -#ifdef __LP64__ +#ifdef CONFIG_64BIT depdi,z 1, 63-PAGE_SHIFT,1, %r25 #else depwi,z 1, 31-PAGE_SHIFT,1, %r25 @@ -944,23 +949,23 @@ flush_kernel_icache_page: sub %r25, %r23, %r25 -1: fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) - fic,m %r23(%r26) +1: fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) + fic,m %r23(%sr4, %r26) CMPB<< %r26, %r25, 1b - fic,m %r23(%r26) + fic,m %r23(%sr4, %r26) sync bv %r0(%r2) @@ -982,17 +987,18 @@ flush_kernel_icache_range_asm: ANDCM %r26, %r21, %r26 1: CMPB<<,n %r26, %r25, 1b - fic,m %r23(%r26) + fic,m %r23(%sr4, %r26) sync bv %r0(%r2) nop .exit - .procend - .align 128 - + /* align should cover use of rfi in disable_sr_hashing_asm and + * srdis_done. + */ + .align 256 .export disable_sr_hashing_asm,code disable_sr_hashing_asm: @@ -1000,28 +1006,26 @@ disable_sr_hashing_asm: .callinfo NO_CALLS .entry - /* Switch to real mode */ - - ssm 0, %r0 /* relied upon translation! */ - nop - nop + /* + * Switch to real mode + */ + /* pcxt_ssm_bug */ + rsm PSW_SM_I, %r0 + load32 PA(1f), %r1 nop nop nop nop nop - - rsm (PSW_SM_Q|PSW_SM_I), %r0 /* disable Q&I to load the iia queue */ - ldil L%REAL_MODE_PSW, %r1 - ldo R%REAL_MODE_PSW(%r1), %r1 - mtctl %r1, %cr22 + + rsm PSW_SM_Q, %r0 /* prep to load iia queue */ mtctl %r0, %cr17 /* Clear IIASQ tail */ mtctl %r0, %cr17 /* Clear IIASQ head */ - ldil L%PA(1f), %r1 - ldo R%PA(1f)(%r1), %r1 mtctl %r1, %cr18 /* IIAOQ head */ ldo 4(%r1), %r1 mtctl %r1, %cr18 /* IIAOQ tail */ + load32 REAL_MODE_PSW, %r1 + mtctl %r1, %ipsw rfi nop @@ -1053,27 +1057,31 @@ srdis_pcxl: srdis_pa20: - /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+ */ + /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */ .word 0x144008bc /* mfdiag %dr2, %r28 */ depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */ .word 0x145c1840 /* mtdiag %r28, %dr2 */ -srdis_done: +srdis_done: /* Switch back to virtual mode */ + rsm PSW_SM_I, %r0 /* prep to load iia queue */ + load32 2f, %r1 + nop + nop + nop + nop + nop - rsm PSW_SM_Q, %r0 /* clear Q bit to load iia queue */ - ldil L%KERNEL_PSW, %r1 - ldo R%KERNEL_PSW(%r1), %r1 - mtctl %r1, %cr22 + rsm PSW_SM_Q, %r0 /* prep to load iia queue */ mtctl %r0, %cr17 /* Clear IIASQ tail */ mtctl %r0, %cr17 /* Clear IIASQ head */ - ldil L%(2f), %r1 - ldo R%(2f)(%r1), %r1 mtctl %r1, %cr18 /* IIAOQ head */ ldo 4(%r1), %r1 mtctl %r1, %cr18 /* IIAOQ tail */ + load32 KERNEL_PSW, %r1 + mtctl %r1, %ipsw rfi nop diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 368cc095c99f..f94a02ef3d95 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -31,7 +31,7 @@ #include <asm/page.h> /* get_order */ #include <asm/pgalloc.h> #include <asm/uaccess.h> - +#include <asm/tlbflush.h> /* for purge_tlb_*() macros */ static struct proc_dir_entry * proc_gsc_root = NULL; static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length); @@ -114,7 +114,7 @@ static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, if (end > PGDIR_SIZE) end = PGDIR_SIZE; do { - pte_t * pte = pte_alloc_kernel(&init_mm, pmd, vaddr); + pte_t * pte = pte_alloc_kernel(pmd, vaddr); if (!pte) return -ENOMEM; if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr)) @@ -333,23 +333,33 @@ pcxl_free_range(unsigned long vaddr, size_t size) static int __init pcxl_dma_init(void) { - if (pcxl_dma_start == 0) - return 0; + if (pcxl_dma_start == 0) + return 0; - spin_lock_init(&pcxl_res_lock); - pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3); - pcxl_res_hint = 0; - pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL, + spin_lock_init(&pcxl_res_lock); + pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3); + pcxl_res_hint = 0; + pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL, get_order(pcxl_res_size)); - memset(pcxl_res_map, 0, pcxl_res_size); - proc_gsc_root = proc_mkdir("gsc", 0); - create_proc_info_entry("dino", 0, proc_gsc_root, pcxl_proc_info); - return 0; + memset(pcxl_res_map, 0, pcxl_res_size); + proc_gsc_root = proc_mkdir("gsc", 0); + if (!proc_gsc_root) + printk(KERN_WARNING + "pcxl_dma_init: Unable to create gsc /proc dir entry\n"); + else { + struct proc_dir_entry* ent; + ent = create_proc_info_entry("pcxl_dma", 0, + proc_gsc_root, pcxl_proc_info); + if (!ent) + printk(KERN_WARNING + "pci-dma.c: Unable to create pcxl_dma /proc entry.\n"); + } + return 0; } __initcall(pcxl_dma_init); -static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flag) +static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { unsigned long vaddr; unsigned long paddr; @@ -502,13 +512,13 @@ struct hppa_dma_ops pcxl_dma_ops = { }; static void *fail_alloc_consistent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag) + dma_addr_t *dma_handle, gfp_t flag) { return NULL; } static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, int flag) + dma_addr_t *dma_handle, gfp_t flag) { void *addr = NULL; @@ -545,16 +555,16 @@ struct hppa_dma_ops pcx_dma_ops = { static int pcxl_proc_info(char *buf, char **start, off_t offset, int len) { +#if 0 u_long i = 0; unsigned long *res_ptr = (u_long *)pcxl_res_map; - unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */ +#endif + unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */ - sprintf(buf, "\nDMA Mapping Area size : %d bytes (%d pages)\n", - PCXL_DMA_MAP_SIZE, - (pcxl_res_size << 3) ); /* 1 bit per page */ + sprintf(buf, "\nDMA Mapping Area size : %d bytes (%ld pages)\n", + PCXL_DMA_MAP_SIZE, total_pages); - sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n", - buf, pcxl_res_size, pcxl_res_size << 3); /* 8 bits per byte */ + sprintf(buf, "%sResource bitmap : %d bytes\n", buf, pcxl_res_size); strcat(buf, " total: free: used: % used:\n"); sprintf(buf, "%sblocks %8d %8ld %8ld %8ld%%\n", buf, pcxl_res_size, @@ -564,7 +574,8 @@ static int pcxl_proc_info(char *buf, char **start, off_t offset, int len) sprintf(buf, "%spages %8ld %8ld %8ld %8ld%%\n", buf, total_pages, total_pages - pcxl_used_pages, pcxl_used_pages, (pcxl_used_pages * 100 / total_pages)); - + +#if 0 strcat(buf, "\nResource bitmap:"); for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) { @@ -572,6 +583,7 @@ static int pcxl_proc_info(char *buf, char **start, off_t offset, int len) strcat(buf,"\n "); sprintf(buf, "%s %08lx", buf, *res_ptr); } +#endif strcat(buf, "\n"); return strlen(buf); } diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index e6a891a0cad0..88cba49c5301 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c @@ -202,7 +202,8 @@ static void pcibios_link_hba_resources( struct resource *hba_res, struct resource *r) { if (!r->parent) { - printk(KERN_EMERG "PCI: Tell willy he's wrong\n"); + printk(KERN_EMERG "PCI: resource not parented! [%lx-%lx]\n", + r->start, r->end); r->parent = hba_res; /* reverse link is harder *sigh* */ diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index 01f676d1673b..215d78c87bc5 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c @@ -41,7 +41,7 @@ /* Define EARLY_BOOTUP_DEBUG to debug kernel related boot problems. * On production kernels EARLY_BOOTUP_DEBUG should be undefined. */ -#undef EARLY_BOOTUP_DEBUG +#define EARLY_BOOTUP_DEBUG #include <linux/config.h> @@ -49,14 +49,8 @@ #include <linux/console.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/delay.h> -#include <linux/sched.h> -#include <linux/interrupt.h> #include <linux/major.h> #include <linux/tty.h> -#include <asm/page.h> -#include <asm/types.h> -#include <asm/system.h> #include <asm/pdc.h> /* for iodc_call() proto and friends */ @@ -96,7 +90,6 @@ static int pdc_console_setup(struct console *co, char *options) } #if defined(CONFIG_PDC_CONSOLE) -#define PDC_CONSOLE_DEVICE pdc_console_device static struct tty_driver * pdc_console_device (struct console *c, int *index) { extern struct tty_driver console_driver; @@ -104,22 +97,19 @@ static struct tty_driver * pdc_console_device (struct console *c, int *index) return &console_driver; } #else -#define PDC_CONSOLE_DEVICE NULL +#define pdc_console_device NULL #endif static struct console pdc_cons = { .name = "ttyB", .write = pdc_console_write, - .device = PDC_CONSOLE_DEVICE, + .device = pdc_console_device, .setup = pdc_console_setup, - .flags = CON_BOOT|CON_PRINTBUFFER|CON_ENABLED, + .flags = CON_BOOT | CON_PRINTBUFFER | CON_ENABLED, .index = -1, }; static int pdc_console_initialized; -extern unsigned long con_start; /* kernel/printk.c */ -extern unsigned long log_end; /* kernel/printk.c */ - static void pdc_console_init_force(void) { @@ -146,27 +136,11 @@ void __init pdc_console_init(void) } -/* Unregister the pdc console with the printk console layer */ -void pdc_console_die(void) -{ - if (!pdc_console_initialized) - return; - --pdc_console_initialized; - - printk(KERN_INFO "Switching from PDC console\n"); - - /* Don't repeat what we've already printed */ - con_start = log_end; - - unregister_console(&pdc_cons); -} - - /* * Used for emergencies. Currently only used if an HPMC occurs. If an * HPMC occurs, it is possible that the current console may not be - * properly initialed after the PDC IO reset. This routine unregisters all - * of the current consoles, reinitializes the pdc console and + * properly initialised after the PDC IO reset. This routine unregisters + * all of the current consoles, reinitializes the pdc console and * registers it. */ @@ -177,13 +151,13 @@ void pdc_console_restart(void) if (pdc_console_initialized) return; + /* If we've already seen the output, don't bother to print it again */ + if (console_drivers != NULL) + pdc_cons.flags &= ~CON_PRINTBUFFER; + while ((console = console_drivers) != NULL) unregister_console(console_drivers); - /* Don't repeat what we've already printed */ - con_start = log_end; - /* force registering the pdc console */ pdc_console_init_force(); } - diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index b3ad0a505b87..f6fec62b6a2f 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c @@ -196,8 +196,7 @@ static int perf_open(struct inode *inode, struct file *file); static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos); -static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, - unsigned long arg); +static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static void perf_start_counters(void); static int perf_stop_counters(uint32_t *raddr); static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num); @@ -438,48 +437,56 @@ static void perf_patch_images(void) * must be running on the processor that you wish to change. */ -static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, - unsigned long arg) +static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long error_start; - uint32_t raddr[4]; + uint32_t raddr[4]; + int error = 0; + lock_kernel(); switch (cmd) { case PA_PERF_ON: /* Start the counters */ perf_start_counters(); - return 0; + break; case PA_PERF_OFF: error_start = perf_stop_counters(raddr); if (error_start != 0) { printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start); - return -EFAULT; + error = -EFAULT; + break; } /* copy out the Counters */ if (copy_to_user((void __user *)arg, raddr, sizeof (raddr)) != 0) { - return -EFAULT; + error = -EFAULT; + break; } - return 0; + break; case PA_PERF_VERSION: /* Return the version # */ - return put_user(PERF_VERSION, (int *)arg); + error = put_user(PERF_VERSION, (int *)arg); + break; default: - break; + error = -ENOTTY; } - return -ENOTTY; + + unlock_kernel(); + + return error; } static struct file_operations perf_fops = { .llseek = no_llseek, .read = perf_read, .write = perf_write, - .ioctl = perf_ioctl, + .unlocked_ioctl = perf_ioctl, + .compat_ioctl = perf_ioctl, .open = perf_open, .release = perf_release }; @@ -746,7 +753,8 @@ static int perf_write_image(uint64_t *memaddr) uint64_t *bptr; uint32_t dwords; uint32_t *intrigue_rdr; - uint64_t *intrigue_bitmask, tmp64, proc_hpa; + uint64_t *intrigue_bitmask, tmp64; + void __iomem *runway; struct rdr_tbl_ent *tentry; int i; @@ -798,15 +806,16 @@ static int perf_write_image(uint64_t *memaddr) return -1; } - proc_hpa = cpu_device->hpa; + runway = ioremap(cpu_device->hpa.start, 4096); /* Merge intrigue bits into Runway STATUS 0 */ - tmp64 = __raw_readq(proc_hpa + RUNWAY_STATUS) & 0xffecfffffffffffful; - __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), proc_hpa + RUNWAY_STATUS); + tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful; + __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), + runway + RUNWAY_STATUS); /* Write RUNWAY DEBUG registers */ for (i = 0; i < 8; i++) { - __raw_writeq(*memaddr++, proc_hpa + RUNWAY_DEBUG + i); + __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG); } return 0; diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 46b759385115..fee4f1f09adc 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -9,7 +9,7 @@ * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org> * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com> - * Copyright (C) 2000 Richard Hirst <rhirst with parisc-lixux.org> + * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org> * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org> * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org> * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org> @@ -88,11 +88,15 @@ void default_idle(void) */ void cpu_idle(void) { + set_thread_flag(TIF_POLLING_NRFLAG); + /* endless idle loop with no priority at all */ while (1) { while (!need_resched()) barrier(); + preempt_enable_no_resched(); schedule(); + preempt_disable(); check_pgt_cache(); } } @@ -245,7 +249,17 @@ int sys_clone(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs) { - int __user *user_tid = (int __user *)regs->gr[26]; + /* Arugments from userspace are: + r26 = Clone flags. + r25 = Child stack. + r24 = parent_tidptr. + r23 = Is the TLS storage descriptor + r22 = child_tidptr + + However, these last 3 args are only examined + if the proper flags are set. */ + int __user *child_tidptr; + int __user *parent_tidptr; /* usp must be word aligned. This also prevents users from * passing in the value 1 (which is the signal for a special @@ -253,10 +267,20 @@ sys_clone(unsigned long clone_flags, unsigned long usp, usp = ALIGN(usp, 4); /* A zero value for usp means use the current stack */ - if(usp == 0) - usp = regs->gr[30]; + if (usp == 0) + usp = regs->gr[30]; + + if (clone_flags & CLONE_PARENT_SETTID) + parent_tidptr = (int __user *)regs->gr[24]; + else + parent_tidptr = NULL; + + if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) + child_tidptr = (int __user *)regs->gr[22]; + else + child_tidptr = NULL; - return do_fork(clone_flags, usp, regs, 0, user_tid, NULL); + return do_fork(clone_flags, usp, regs, 0, parent_tidptr, child_tidptr); } int @@ -332,6 +356,10 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp, } else { cregs->kpc = (unsigned long) &child_return; } + /* Setup thread TLS area from the 4th parameter in clone */ + if (clone_flags & CLONE_SETTLS) + cregs->cr27 = pregs->gr[23]; + } return 0; diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 13b721cb9f55..4f5bbcf1f5a4 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c @@ -92,7 +92,7 @@ static int __init processor_probe(struct parisc_device *dev) * May get overwritten by PAT code. */ cpuid = boot_cpu_data.cpu_count; - txn_addr = dev->hpa; /* for legacy PDC */ + txn_addr = dev->hpa.start; /* for legacy PDC */ #ifdef __LP64__ if (is_pdc_pat()) { @@ -122,7 +122,7 @@ static int __init processor_probe(struct parisc_device *dev) * boot time (ie shutdown a CPU from an OS perspective). */ /* get the cpu number */ - status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa); + status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start); BUG_ON(PDC_OK != status); @@ -130,7 +130,7 @@ static int __init processor_probe(struct parisc_device *dev) printk(KERN_WARNING "IGNORING CPU at 0x%x," " cpu_slot_id > NR_CPUS" " (%ld > %d)\n", - dev->hpa, cpu_info.cpu_num, NR_CPUS); + dev->hpa.start, cpu_info.cpu_num, NR_CPUS); /* Ignore CPU since it will only crash */ boot_cpu_data.cpu_count--; return 1; @@ -149,7 +149,7 @@ static int __init processor_probe(struct parisc_device *dev) p->loops_per_jiffy = loops_per_jiffy; p->dev = dev; /* Save IODC data in case we need it */ - p->hpa = dev->hpa; /* save CPU hpa */ + p->hpa = dev->hpa.start; /* save CPU hpa */ p->cpuid = cpuid; /* save CPU id */ p->txn_addr = txn_addr; /* save CPU IRQ address */ #ifdef CONFIG_SMP diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index f3428e5e86fb..27160e8bf15b 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -78,52 +78,13 @@ void ptrace_disable(struct task_struct *child) pa_psw(child)->l = 0; } -long sys_ptrace(long request, pid_t pid, long addr, long data) +long arch_ptrace(struct task_struct *child, long request, long addr, long data) { - struct task_struct *child; long ret; #ifdef DEBUG_PTRACE long oaddr=addr, odata=data; #endif - lock_kernel(); - ret = -EPERM; - if (request == PTRACE_TRACEME) { - /* are we already being traced? */ - if (current->ptrace & PT_PTRACED) - goto out; - - ret = security_ptrace(current->parent, current); - if (ret) - goto out; - - /* set the ptrace bit in the process flags. */ - current->ptrace |= PT_PTRACED; - ret = 0; - goto out; - } - - ret = -ESRCH; - read_lock(&tasklist_lock); - child = find_task_by_pid(pid); - if (child) - get_task_struct(child); - read_unlock(&tasklist_lock); - if (!child) - goto out; - ret = -EPERM; - if (pid == 1) /* no messing around with init! */ - goto out_tsk; - - if (request == PTRACE_ATTACH) { - ret = ptrace_attach(child); - goto out_tsk; - } - - ret = ptrace_check_attach(child, request == PTRACE_KILL); - if (ret < 0) - goto out_tsk; - switch (request) { case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA: { @@ -303,6 +264,7 @@ long sys_ptrace(long request, pid_t pid, long addr, long data) * sigkill. perhaps it should be put in the status * that it wants to exit. */ + ret = 0; DBG("sys_ptrace(KILL)\n"); if (child->exit_state == EXIT_ZOMBIE) /* already dead */ goto out_tsk; @@ -396,10 +358,7 @@ out_wake: wake_up_process(child); ret = 0; out_tsk: - put_task_struct(child); -out: - unlock_kernel(); - DBG("sys_ptrace(%ld, %d, %lx, %lx) returning %ld\n", + DBG("arch_ptrace(%ld, %d, %lx, %lx) returning %ld\n", request, pid, oaddr, odata, ret); return ret; } diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S index 8dd5defb7316..8c2859cca77e 100644 --- a/arch/parisc/kernel/real2.S +++ b/arch/parisc/kernel/real2.S @@ -7,8 +7,10 @@ * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com) * */ -#include <asm/assembly.h> +#include <linux/config.h> + #include <asm/psw.h> +#include <asm/assembly.h> .section .bss .export real_stack @@ -20,7 +22,7 @@ real32_stack: real64_stack: .block 8192 -#ifdef __LP64__ +#ifdef CONFIG_64BIT # define REG_SZ 8 #else # define REG_SZ 4 @@ -50,7 +52,7 @@ save_cr_end: real32_call_asm: STREG %rp, -RP_OFFSET(%sp) /* save RP */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT callee_save ldo 2*REG_SZ(%sp), %sp /* room for a couple more saves */ STREG %r27, -1*REG_SZ(%sp) @@ -77,7 +79,7 @@ real32_call_asm: b,l save_control_regs,%r2 /* modifies r1, r2, r28 */ nop -#ifdef __LP64__ +#ifdef CONFIG_64BIT rsm PSW_SM_W, %r0 /* go narrow */ #endif @@ -85,7 +87,7 @@ real32_call_asm: bv 0(%r31) nop ric_ret: -#ifdef __LP64__ +#ifdef CONFIG_64BIT ssm PSW_SM_W, %r0 /* go wide */ #endif /* restore CRs before going virtual in case we page fault */ @@ -97,7 +99,7 @@ ric_ret: tovirt_r1 %sp LDREG -REG_SZ(%sp), %sp /* restore SP */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT LDREG -1*REG_SZ(%sp), %r27 LDREG -2*REG_SZ(%sp), %r29 ldo -2*REG_SZ(%sp), %sp @@ -143,24 +145,21 @@ restore_control_regs: /* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for * more general-purpose use by the several places which need RFIs */ - .align 128 .text + .align 128 rfi_virt2real: /* switch to real mode... */ - ssm 0,0 /* See "relied upon translation" */ - nop /* PA 2.0 Arch. F-5 */ - nop - nop + rsm PSW_SM_I,%r0 + load32 PA(rfi_v2r_1), %r1 nop nop nop nop nop - rsm (PSW_SM_Q|PSW_SM_I),%r0 /* disable Q & I bits to load iia queue */ + rsm PSW_SM_Q,%r0 /* disable Q & I bits to load iia queue */ mtctl %r0, %cr17 /* Clear IIASQ tail */ mtctl %r0, %cr17 /* Clear IIASQ head */ - load32 PA(rfi_v2r_1), %r1 mtctl %r1, %cr18 /* IIAOQ head */ ldo 4(%r1), %r1 mtctl %r1, %cr18 /* IIAOQ tail */ @@ -184,10 +183,8 @@ rfi_v2r_1: .text .align 128 rfi_real2virt: - ssm 0,0 /* See "relied upon translation" */ - nop /* PA 2.0 Arch. F-5 */ - nop - nop + rsm PSW_SM_I,%r0 + load32 (rfi_r2v_1), %r1 nop nop nop @@ -197,7 +194,6 @@ rfi_real2virt: rsm PSW_SM_Q,%r0 /* disable Q bit to load iia queue */ mtctl %r0, %cr17 /* Clear IIASQ tail */ mtctl %r0, %cr17 /* Clear IIASQ head */ - load32 (rfi_r2v_1), %r1 mtctl %r1, %cr18 /* IIAOQ head */ ldo 4(%r1), %r1 mtctl %r1, %cr18 /* IIAOQ tail */ @@ -218,7 +214,7 @@ rfi_r2v_1: bv 0(%r2) nop -#ifdef __LP64__ +#ifdef CONFIG_64BIT /************************ 64-bit real-mode calls ***********************/ /* This is only usable in wide kernels right now and will probably stay so */ @@ -296,7 +292,7 @@ pc_in_user_space: ** comparing function pointers. */ __canonicalize_funcptr_for_compare: -#ifdef __LP64__ +#ifdef CONFIG_64BIT bve (%r2) #else bv %r0(%r2) diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 0224651fd8f1..3a25a7bd673e 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -296,7 +296,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, struct rt_sigframe __user *frame; unsigned long rp, usp; unsigned long haddr, sigframe_size; - struct siginfo si; int err = 0; #ifdef __LP64__ compat_int_t compat_val; @@ -490,15 +489,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, give_sigsegv: DBG(1,"setup_rt_frame: sending SIGSEGV\n"); - if (sig == SIGSEGV) - ka->sa.sa_handler = SIG_DFL; - si.si_signo = SIGSEGV; - si.si_errno = 0; - si.si_code = SI_KERNEL; - si.si_pid = current->pid; - si.si_uid = current->uid; - si.si_addr = frame; - force_sig_info(SIGSEGV, &si, current); + force_sigsegv(sig, current); return 0; } @@ -633,10 +624,14 @@ do_signal(sigset_t *oldset, struct pt_regs *regs, int in_syscall) put_user(0xe0008200, &usp[3]); put_user(0x34140000, &usp[4]); - /* Stack is 64-byte aligned, and we only - * need to flush 1 cache line */ - asm("fdc 0(%%sr3, %0)\n" - "fic 0(%%sr3, %0)\n" + /* Stack is 64-byte aligned, and we only need + * to flush 1 cache line. + * Flushing one cacheline is cheap. + * "sync" on bigger (> 4 way) boxes is not. + */ + asm("fdc %%r0(%%sr3, %0)\n" + "sync\n" + "fic %%r0(%%sr3, %0)\n" "sync\n" : : "r"(regs->gr[30])); diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index bcc7e83f5142..ce89da0f654d 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -18,7 +18,7 @@ */ #undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */ -#include <linux/autoconf.h> +#include <linux/config.h> #include <linux/types.h> #include <linux/spinlock.h> @@ -181,12 +181,19 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) while (ops) { unsigned long which = ffz(~ops); + ops &= ~(1 << which); + switch (which) { + case IPI_NOP: +#if (kDEBUG>=100) + printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu); +#endif /* kDEBUG */ + break; + case IPI_RESCHEDULE: #if (kDEBUG>=100) printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu); #endif /* kDEBUG */ - ops &= ~(1 << IPI_RESCHEDULE); /* * Reschedule callback. Everything to be * done is done by the interrupt return path. @@ -197,7 +204,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) #if (kDEBUG>=100) printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu); #endif /* kDEBUG */ - ops &= ~(1 << IPI_CALL_FUNC); { volatile struct smp_call_struct *data; void (*func)(void *info); @@ -231,7 +237,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) #if (kDEBUG>=100) printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu); #endif /* kDEBUG */ - ops &= ~(1 << IPI_CPU_START); #ifdef ENTRY_SYS_CPUS p->state = STATE_RUNNING; #endif @@ -241,7 +246,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) #if (kDEBUG>=100) printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu); #endif /* kDEBUG */ - ops &= ~(1 << IPI_CPU_STOP); #ifdef ENTRY_SYS_CPUS #else halt_processor(); @@ -252,13 +256,11 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs) #if (kDEBUG>=100) printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu); #endif /* kDEBUG */ - ops &= ~(1 << IPI_CPU_TEST); break; default: printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", this_cpu, which); - ops &= ~(1 << which); return IRQ_NONE; } /* Switch */ } /* while (ops) */ @@ -312,6 +314,12 @@ smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); } void smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } +void +smp_send_all_nop(void) +{ + send_IPI_allbutself(IPI_NOP); +} + /** * Run a function on all other CPUs. @@ -338,6 +346,10 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait) /* Can deadlock when called with interrupts disabled */ WARN_ON(irqs_disabled()); + + /* can also deadlock if IPIs are disabled */ + WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0); + data.func = func; data.info = info; @@ -463,6 +475,7 @@ void __init smp_callin(void) #endif smp_cpu_init(slave_id); + preempt_disable(); #if 0 /* NOT WORKING YET - see entry.S */ istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER); diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 8c7a7185cd3b..d66163492890 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -6,6 +6,7 @@ * thanks to Philipp Rumpf, Mike Shaver and various others * sorry about the wall, puffin.. */ +#include <linux/config.h> /* for CONFIG_SMP */ #include <asm/asm-offsets.h> #include <asm/unistd.h> @@ -22,15 +23,13 @@ */ #define KILL_INSN break 0,0 -#include <linux/config.h> /* for CONFIG_SMP */ - -#ifdef __LP64__ +#ifdef CONFIG_64BIT .level 2.0w #else .level 1.1 #endif -#ifndef __LP64__ +#ifndef CONFIG_64BIT .macro fixup_branch,lbl b \lbl .endm @@ -103,7 +102,7 @@ linux_gateway_entry: mfsp %sr7,%r1 /* save user sr7 */ mtsp %r1,%sr3 /* and store it in sr3 */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* for now we can *always* set the W bit on entry to the syscall * since we don't support wide userland processes. We could * also save the current SM other than in r0 and restore it on @@ -155,7 +154,7 @@ linux_gateway_entry: STREG %r19, TASK_PT_GR19(%r1) LDREGM -FRAME_SIZE(%r30), %r2 /* get users sp back */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT extrd,u %r2,63,1,%r19 /* W hidden in bottom bit */ #if 0 xor %r19,%r2,%r2 /* clear bottom bit */ @@ -165,7 +164,7 @@ linux_gateway_entry: #endif STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */ - STREG %r20, TASK_PT_GR20(%r1) + STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */ STREG %r21, TASK_PT_GR21(%r1) STREG %r22, TASK_PT_GR22(%r1) STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */ @@ -186,7 +185,7 @@ linux_gateway_entry: loadgp -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ copy %r19,%r2 /* W bit back to r2 */ #else @@ -205,7 +204,7 @@ linux_gateway_entry: /* Note! We cannot use the syscall table that is mapped nearby since the gateway page is mapped execute-only. */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldil L%sys_call_table, %r1 or,= %r2,%r2,%r2 addil L%(sys_call_table64-sys_call_table), %r1 @@ -321,7 +320,7 @@ tracesys_next: LDREG TASK_PT_GR25(%r1), %r25 LDREG TASK_PT_GR24(%r1), %r24 LDREG TASK_PT_GR23(%r1), %r23 -#ifdef __LP64__ +#ifdef CONFIG_64BIT LDREG TASK_PT_GR22(%r1), %r22 LDREG TASK_PT_GR21(%r1), %r21 ldo -16(%r30),%r29 /* Reference param save area */ @@ -350,7 +349,7 @@ tracesys_next: tracesys_exit: ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif bl syscall_trace, %r2 @@ -371,7 +370,7 @@ tracesys_exit: tracesys_sigexit: ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG 0(%r1), %r1 -#ifdef __LP64__ +#ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif bl syscall_trace, %r2 @@ -404,7 +403,7 @@ lws_start: gate .+8, %r0 depi 3, 31, 2, %r31 /* Ensure we return to userspace */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* FIXME: If we are a 64-bit kernel just * turn this on unconditionally. */ @@ -440,7 +439,7 @@ lws_exit_nosys: /* Fall through: Return to userspace */ lws_exit: -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* decide whether to reset the wide mode bit * * For a syscall, the W bit is stored in the lowest bit @@ -486,7 +485,7 @@ lws_exit: /* ELF64 Process entry path */ lws_compare_and_swap64: -#ifdef __LP64__ +#ifdef CONFIG_64BIT b,n lws_compare_and_swap #else /* If we are not a 64-bit kernel, then we don't @@ -497,7 +496,7 @@ lws_compare_and_swap64: /* ELF32 Process entry path */ lws_compare_and_swap32: -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* Clip all the input registers */ depdi 0, 31, 32, %r26 depdi 0, 31, 32, %r25 @@ -528,6 +527,7 @@ lws_compare_and_swap: We *must* giveup this call and fail. */ ldw 4(%sr2,%r20), %r28 /* Load thread register */ + /* WARNING: If cr27 cycles to the same value we have problems */ mfctl %cr27, %r21 /* Get current thread register */ cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */ b lws_exit /* Return error! */ @@ -608,7 +608,7 @@ cas_action: the other for the store. Either return -EFAULT. Each of the entries must be relocated. */ .section __ex_table,"aw" -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* Pad the address calculation */ .word 0,(2b - linux_gateway_page) .word 0,(3b - linux_gateway_page) @@ -619,7 +619,7 @@ cas_action: .previous .section __ex_table,"aw" -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* Pad the address calculation */ .word 0,(1b - linux_gateway_page) .word 0,(3b - linux_gateway_page) @@ -638,7 +638,7 @@ end_linux_gateway_page: /* Relocate symbols assuming linux_gateway_page is mapped to virtual address 0x0 */ -#ifdef __LP64__ +#ifdef CONFIG_64BIT /* FIXME: The code will always be on the gateay page and thus it will be on the first 4k, the assembler seems to think that the final @@ -666,7 +666,7 @@ lws_table: sys_call_table: #include "syscall_table.S" -#ifdef __LP64__ +#ifdef CONFIG_64BIT .align 4096 .export sys_call_table64 .Lsys_call_table64: diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index dcfa4d3d0e7d..32cbc0489324 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -35,7 +35,7 @@ #undef ENTRY_UHOH #undef ENTRY_COMP #undef ENTRY_OURS -#if defined(__LP64__) && !defined(SYSCALL_TABLE_64BIT) +#if defined(CONFIG_64BIT) && !defined(SYSCALL_TABLE_64BIT) /* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and * narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific * implementation is required on wide palinux. Use ENTRY_COMP where @@ -46,7 +46,7 @@ #define ENTRY_UHOH(_name_) .dword sys32_##unimplemented #define ENTRY_OURS(_name_) .dword parisc_##_name_ #define ENTRY_COMP(_name_) .dword compat_sys_##_name_ -#elif defined(__LP64__) && defined(SYSCALL_TABLE_64BIT) +#elif defined(CONFIG_64BIT) && defined(SYSCALL_TABLE_64BIT) #define ENTRY_SAME(_name_) .dword sys_##_name_ #define ENTRY_DIFF(_name_) .dword sys_##_name_ #define ENTRY_UHOH(_name_) .dword sys_##_name_ @@ -368,5 +368,11 @@ ENTRY_COMP(mbind) /* 260 */ ENTRY_COMP(get_mempolicy) ENTRY_COMP(set_mempolicy) + ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */ + ENTRY_SAME(add_key) + ENTRY_SAME(request_key) /* 265 */ + ENTRY_SAME(keyctl) + ENTRY_SAME(ioprio_set) + ENTRY_SAME(ioprio_get) /* Nothing yet */ diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 7ff67f8e9f8c..cded25680787 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -33,10 +33,6 @@ #include <linux/timex.h> -u64 jiffies_64 = INITIAL_JIFFIES; - -EXPORT_SYMBOL(jiffies_64); - /* xtime and wall_jiffies keep wall-clock time */ extern unsigned long wall_jiffies; @@ -89,14 +85,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) } } -#ifdef CONFIG_CHASSIS_LCD_LED - /* Only schedule the led tasklet on cpu 0, and only if it - * is enabled. - */ - if (cpu == 0 && !atomic_read(&led_tasklet.count)) - tasklet_schedule(&led_tasklet); -#endif - /* check soft power switch status */ if (cpu == 0 && !atomic_read(&power_tasklet.count)) tasklet_schedule(&power_tasklet); @@ -104,6 +92,24 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) return IRQ_HANDLED; } + +unsigned long profile_pc(struct pt_regs *regs) +{ + unsigned long pc = instruction_pointer(regs); + + if (regs->gr[0] & PSW_N) + pc -= 4; + +#ifdef CONFIG_SMP + if (in_lock_functions(pc)) + pc = regs->gr[2]; +#endif + + return pc; +} +EXPORT_SYMBOL(profile_pc); + + /*** converted from ia64 ***/ /* * Return the number of micro-seconds that elapsed since the last diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index d2e5b229a2f4..15914f0235a0 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -74,7 +74,10 @@ void show_regs(struct pt_regs *regs) char *level; unsigned long cr30; unsigned long cr31; - + /* carlos says that gcc understands better memory in a struct, + * and it makes our life easier with fpregs -- T-Bone */ + struct { u32 sw[2]; } s; + level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT; printk("%s\n", level); /* don't want to have that pretty register dump messed up */ @@ -103,11 +106,33 @@ void show_regs(struct pt_regs *regs) printk("%s\n", buf); } -#if RIDICULOUSLY_VERBOSE - for (i = 0; i < 32; i += 2) - printk("%sFR%02d : %016lx FR%2d : %016lx", level, i, - regs->fr[i], i+1, regs->fr[i+1]); -#endif + /* FR are 64bit everywhere. Need to use asm to get the content + * of fpsr/fper1, and we assume that we won't have a FP Identify + * in our way, otherwise we're screwed. + * The fldd is used to restore the T-bit if there was one, as the + * store clears it anyway. + * BTW, PA2.0 book says "thou shall not use fstw on FPSR/FPERs". */ + __asm__ ( + "fstd %%fr0,0(%1) \n\t" + "fldd 0(%1),%%fr0 \n\t" + : "=m" (s) : "r" (&s) : "%r0" + ); + + printk("%s\n", level); + printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level); + printbinary(buf, s.sw[0], 32); + printk("%sFPSR: %s\n", level, buf); + printk("%sFPER1: %08x\n", level, s.sw[1]); + + /* here we'll print fr0 again, tho it'll be meaningless */ + for (i = 0; i < 32; i += 4) { + int j; + p = buf; + p += sprintf(p, "%sfr%02d-%02d ", level, i, i + 3); + for (j = 0; j < 4; j++) + p += sprintf(p, " %016llx", (i+j) == 0 ? 0 : regs->fr[i+j]); + printk("%s\n", buf); + } cr30 = mfctl(30); cr31 = mfctl(31); diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 62eea35bcd69..eaae8a021f9f 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -513,15 +513,18 @@ void handle_unaligned(struct pt_regs *regs) register int flop=0; /* true if this is a flop */ /* log a message with pacing */ - if (user_mode(regs)) - { - if (unaligned_count > 5 && jiffies - last_time > 5*HZ) - { + if (user_mode(regs)) { + if (current->thread.flags & PARISC_UAC_SIGBUS) { + goto force_sigbus; + } + + if (unaligned_count > 5 && jiffies - last_time > 5*HZ) { unaligned_count = 0; last_time = jiffies; } - if (++unaligned_count < 5) - { + + if (!(current->thread.flags & PARISC_UAC_NOPRINT) + && ++unaligned_count < 5) { char buf[256]; sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n", current->comm, current->pid, regs->ior, regs->iaoq[0]); @@ -530,6 +533,7 @@ void handle_unaligned(struct pt_regs *regs) show_regs(regs); #endif } + if (!unaligned_enabled) goto force_sigbus; } |