diff options
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/include/asm/atomic_32.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/futex.h | 40 | ||||
-rw-r--r-- | arch/tile/include/asm/spinlock_32.h | 2 | ||||
-rw-r--r-- | arch/tile/include/asm/spinlock_64.h | 2 | ||||
-rw-r--r-- | arch/tile/kernel/pci.c | 39 | ||||
-rw-r--r-- | arch/tile/kernel/pci_gx.c | 35 | ||||
-rw-r--r-- | arch/tile/kernel/setup.c | 2 | ||||
-rw-r--r-- | arch/tile/lib/spinlock_32.c | 23 | ||||
-rw-r--r-- | arch/tile/lib/spinlock_64.c | 22 |
9 files changed, 40 insertions, 127 deletions
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index a93774255136..53a423e7cb92 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -101,6 +101,8 @@ static inline void atomic_set(atomic_t *v, int n) _atomic_xchg(&v->counter, n); } +#define atomic_set_release(v, i) atomic_set((v), (i)) + /* A 64bit atomic type */ typedef struct { diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h index e64a1b75fc38..83c1e639b411 100644 --- a/arch/tile/include/asm/futex.h +++ b/arch/tile/include/asm/futex.h @@ -106,12 +106,9 @@ lock = __atomic_hashed_lock((int __force *)uaddr) #endif -static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, + u32 __user *uaddr) { - int op = (encoded_op >> 28) & 7; - int cmp = (encoded_op >> 24) & 15; - int oparg = (encoded_op << 8) >> 20; - int cmparg = (encoded_op << 20) >> 20; int uninitialized_var(val), ret; __futex_prolog(); @@ -119,12 +116,6 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) /* The 32-bit futex code makes this assumption, so validate it here. */ BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int)); - if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) - oparg = 1 << oparg; - - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) - return -EFAULT; - pagefault_disable(); switch (op) { case FUTEX_OP_SET: @@ -148,30 +139,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) } pagefault_enable(); - if (!ret) { - switch (cmp) { - case FUTEX_OP_CMP_EQ: - ret = (val == cmparg); - break; - case FUTEX_OP_CMP_NE: - ret = (val != cmparg); - break; - case FUTEX_OP_CMP_LT: - ret = (val < cmparg); - break; - case FUTEX_OP_CMP_GE: - ret = (val >= cmparg); - break; - case FUTEX_OP_CMP_LE: - ret = (val <= cmparg); - break; - case FUTEX_OP_CMP_GT: - ret = (val > cmparg); - break; - default: - ret = -ENOSYS; - } - } + if (!ret) + *oval = val; + return ret; } diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h index b14b1ba5bf9c..cba8ba9b8da6 100644 --- a/arch/tile/include/asm/spinlock_32.h +++ b/arch/tile/include/asm/spinlock_32.h @@ -64,8 +64,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) lock->current_ticket = old_ticket + TICKET_QUANTUM; } -void arch_spin_unlock_wait(arch_spinlock_t *lock); - /* * Read-write spinlocks, allowing multiple readers * but only one writer. diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h index b9718fb4e74a..9a2c2d605752 100644 --- a/arch/tile/include/asm/spinlock_64.h +++ b/arch/tile/include/asm/spinlock_64.h @@ -58,8 +58,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT); } -void arch_spin_unlock_wait(arch_spinlock_t *lock); - void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val); /* Grab the "next" ticket number and bump it atomically. diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index bc6656b5708b..bbf81579b1f8 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -67,16 +67,6 @@ static struct pci_ops tile_cfg_ops; /* - * We don't need to worry about the alignment of resources. - */ -resource_size_t pcibios_align_resource(void *data, const struct resource *res, - resource_size_t size, resource_size_t align) -{ - return res->start; -} -EXPORT_SYMBOL(pcibios_align_resource); - -/* * Open a FD to the hypervisor PCI device. * * controller_id is the controller number, config type is 0 or 1 for @@ -274,6 +264,7 @@ static void fixup_read_and_payload_sizes(void) */ int __init pcibios_init(void) { + struct pci_host_bridge *bridge; int i; pr_info("PCI: Probing PCI hardware\n"); @@ -306,16 +297,26 @@ int __init pcibios_init(void) pci_add_resource(&resources, &ioport_resource); pci_add_resource(&resources, &iomem_resource); - bus = pci_scan_root_bus(NULL, 0, controller->ops, - controller, &resources); + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + break; + + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = controller; + bridge->busnr = 0; + bridge->ops = controller->ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = tile_map_irq; + + pci_scan_root_bus_bridge(bridge); + bus = bridge->bus; controller->root_bus = bus; controller->last_busno = bus->busn_res.end; } } - /* Do machine dependent PCI interrupt routing */ - pci_fixup_irqs(pci_common_swizzle, tile_map_irq); - /* * This comes from the generic Linux PCI driver. * @@ -369,14 +370,6 @@ int __init pcibios_init(void) } subsys_initcall(pcibios_init); -/* - * No bus fixups needed. - */ -void pcibios_fixup_bus(struct pci_bus *bus) -{ - /* Nothing needs to be done. */ -} - void pcibios_set_master(struct pci_dev *dev) { /* No special bus mastering setup handling. */ diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index b554a68eea1b..9aa238ac7b35 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -108,15 +108,6 @@ static struct pci_ops tile_cfg_ops; /* Mask of CPUs that should receive PCIe interrupts. */ static struct cpumask intr_cpus_map; -/* We don't need to worry about the alignment of resources. */ -resource_size_t pcibios_align_resource(void *data, const struct resource *res, - resource_size_t size, - resource_size_t align) -{ - return res->start; -} -EXPORT_SYMBOL(pcibios_align_resource); - /* * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #. * For now, we simply send interrupts to non-dataplane CPUs. @@ -669,6 +660,7 @@ int __init pcibios_init(void) resource_size_t offset; LIST_HEAD(resources); int next_busno; + struct pci_host_bridge *bridge; int i; tile_pci_init(); @@ -881,15 +873,25 @@ int __init pcibios_init(void) controller->mem_offset); pci_add_resource(&resources, &controller->io_space); controller->first_busno = next_busno; - bus = pci_scan_root_bus(NULL, next_busno, controller->ops, - controller, &resources); + + bridge = pci_alloc_host_bridge(0); + if (!bridge) + break; + + list_splice_init(&resources, &bridge->windows); + bridge->dev.parent = NULL; + bridge->sysdata = controller; + bridge->busnr = next_busno; + bridge->ops = controller->ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = tile_map_irq; + + pci_scan_root_bus_bridge(bridge); + bus = bridge->bus; controller->root_bus = bus; next_busno = bus->busn_res.end + 1; } - /* Do machine dependent PCI interrupt routing */ - pci_fixup_irqs(pci_common_swizzle, tile_map_irq); - /* * This comes from the generic Linux PCI driver. * @@ -1038,11 +1040,6 @@ alloc_mem_map_failed: } subsys_initcall(pcibios_init); -/* No bus fixups needed. */ -void pcibios_fixup_bus(struct pci_bus *bus) -{ -} - /* Process any "pci=" kernel boot arguments. */ char *__init pcibios_setup(char *str) { diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 443a70bccc1c..6becb96c60a0 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1200,7 +1200,7 @@ static void __init validate_hv(void) * We use a struct cpumask for this, so it must be big enough. */ if ((smp_height * smp_width) > nr_cpu_ids) - early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %d\n", + early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %u\n", smp_height, smp_width, nr_cpu_ids); #endif diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c index 076c6cc43113..db9333f2447c 100644 --- a/arch/tile/lib/spinlock_32.c +++ b/arch/tile/lib/spinlock_32.c @@ -62,29 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock) } EXPORT_SYMBOL(arch_spin_trylock); -void arch_spin_unlock_wait(arch_spinlock_t *lock) -{ - u32 iterations = 0; - int curr = READ_ONCE(lock->current_ticket); - int next = READ_ONCE(lock->next_ticket); - - /* Return immediately if unlocked. */ - if (next == curr) - return; - - /* Wait until the current locker has released the lock. */ - do { - delay_backoff(iterations++); - } while (READ_ONCE(lock->current_ticket) == curr); - - /* - * The TILE architecture doesn't do read speculation; therefore - * a control dependency guarantees a LOAD->{LOAD,STORE} order. - */ - barrier(); -} -EXPORT_SYMBOL(arch_spin_unlock_wait); - /* * The low byte is always reserved to be the marker for a "tns" operation * since the low bit is set to "1" by a tns. The next seven bits are diff --git a/arch/tile/lib/spinlock_64.c b/arch/tile/lib/spinlock_64.c index a4b5b2cbce93..de414c22892f 100644 --- a/arch/tile/lib/spinlock_64.c +++ b/arch/tile/lib/spinlock_64.c @@ -62,28 +62,6 @@ int arch_spin_trylock(arch_spinlock_t *lock) } EXPORT_SYMBOL(arch_spin_trylock); -void arch_spin_unlock_wait(arch_spinlock_t *lock) -{ - u32 iterations = 0; - u32 val = READ_ONCE(lock->lock); - u32 curr = arch_spin_current(val); - - /* Return immediately if unlocked. */ - if (arch_spin_next(val) == curr) - return; - - /* Wait until the current locker has released the lock. */ - do { - delay_backoff(iterations++); - } while (arch_spin_current(READ_ONCE(lock->lock)) == curr); - - /* - * The TILE architecture doesn't do read speculation; therefore - * a control dependency guarantees a LOAD->{LOAD,STORE} order. - */ - barrier(); -} -EXPORT_SYMBOL(arch_spin_unlock_wait); /* * If the read lock fails due to a writer, we retry periodically |