diff options
Diffstat (limited to 'arch/tile')
33 files changed, 227 insertions, 284 deletions
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 91de7dd7427f..37dc9364c4a1 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig @@ -218,7 +218,6 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=y CONFIG_VETH=m CONFIG_NET_DSA_MV88E6060=y diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index c7702b7ab7a5..76a2781dec2c 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig @@ -337,7 +337,6 @@ CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y -CONFIG_NETPOLL_TRAP=y CONFIG_TUN=y CONFIG_VETH=m CONFIG_NET_DSA_MV88E6060=y diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index 320ff5e6e61e..6f00e9850636 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c @@ -463,6 +463,7 @@ int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context, (uint64_t)ts->tv_nsec, (uint64_t)cycles); } +EXPORT_SYMBOL_GPL(gxio_mpipe_set_timestamp); int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context, struct timespec *ts) @@ -485,11 +486,13 @@ int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context, } return ret; } +EXPORT_SYMBOL_GPL(gxio_mpipe_get_timestamp); int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta) { return gxio_mpipe_adjust_timestamp_aux(context, delta); } +EXPORT_SYMBOL_GPL(gxio_mpipe_adjust_timestamp); /* Get our internal context used for link name access. This context is * special in that it is not associated with an mPIPE service domain. @@ -542,6 +545,7 @@ int gxio_mpipe_link_instance(const char *link_name) return gxio_mpipe_info_instance_aux(context, name); } +EXPORT_SYMBOL_GPL(gxio_mpipe_link_instance); int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) { diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index e6462b8a6284..b4c488b65745 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -11,7 +11,6 @@ generic-y += errno.h generic-y += exec.h generic-y += fb.h generic-y += fcntl.h -generic-y += hash.h generic-y += hw_irq.h generic-y += ioctl.h generic-y += ioctls.h diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index 9fe434969fab..6ef4ecab1df2 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h @@ -241,6 +241,10 @@ static inline void writeq(u64 val, unsigned long addr) #define readw_relaxed readw #define readl_relaxed readl #define readq_relaxed readq +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel +#define writeq_relaxed writeq #define ioread8 readb #define ioread16 readw @@ -392,8 +396,7 @@ extern void ioport_unmap(void __iomem *addr); static inline long ioport_panic(void) { #ifdef __tilegx__ - panic("PCI IO space support is disabled. Configure the kernel with" - " CONFIG_TILE_PCI_IO to enable it"); + panic("PCI IO space support is disabled. Configure the kernel with CONFIG_TILE_PCI_IO to enable it"); #else panic("inb/outb and friends do not exist on tile"); #endif @@ -402,7 +405,7 @@ static inline long ioport_panic(void) static inline void __iomem *ioport_map(unsigned long port, unsigned int len) { - pr_info("ioport_map: mapping IO resources is unsupported on tile.\n"); + pr_info("ioport_map: mapping IO resources is unsupported on tile\n"); return NULL; } diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h index 33587f16c152..5d1950788c69 100644 --- a/arch/tile/include/asm/pgtable.h +++ b/arch/tile/include/asm/pgtable.h @@ -235,9 +235,9 @@ static inline void __pte_clear(pte_t *ptep) #define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x)) #define pte_ERROR(e) \ - pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e)) + pr_err("%s:%d: bad pte 0x%016llx\n", __FILE__, __LINE__, pte_val(e)) #define pgd_ERROR(e) \ - pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e)) + pr_err("%s:%d: bad pgd 0x%016llx\n", __FILE__, __LINE__, pgd_val(e)) /* Return PA and protection info for a given kernel VA. */ int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte); diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h index 2c8a9cd102d3..e96cec52f6d8 100644 --- a/arch/tile/include/asm/pgtable_64.h +++ b/arch/tile/include/asm/pgtable_64.h @@ -86,7 +86,7 @@ static inline int pud_huge_page(pud_t pud) } #define pmd_ERROR(e) \ - pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e)) + pr_err("%s:%d: bad pmd 0x%016llx\n", __FILE__, __LINE__, pmd_val(e)) static inline void pud_clear(pud_t *pudp) { diff --git a/arch/tile/include/uapi/asm/ptrace.h b/arch/tile/include/uapi/asm/ptrace.h index 7757e1985fb6..d03b829857e8 100644 --- a/arch/tile/include/uapi/asm/ptrace.h +++ b/arch/tile/include/uapi/asm/ptrace.h @@ -52,12 +52,16 @@ typedef uint_reg_t pt_reg_t; * system call or exception. "struct sigcontext" has the same shape. */ struct pt_regs { - /* Saved main processor registers; 56..63 are special. */ - /* tp, sp, and lr must immediately follow regs[] for aliasing. */ - pt_reg_t regs[53]; - pt_reg_t tp; /* aliases regs[TREG_TP] */ - pt_reg_t sp; /* aliases regs[TREG_SP] */ - pt_reg_t lr; /* aliases regs[TREG_LR] */ + union { + /* Saved main processor registers; 56..63 are special. */ + pt_reg_t regs[56]; + struct { + pt_reg_t __regs[53]; + pt_reg_t tp; /* aliases regs[TREG_TP] */ + pt_reg_t sp; /* aliases regs[TREG_SP] */ + pt_reg_t lr; /* aliases regs[TREG_LR] */ + }; + }; /* Saved special registers. */ pt_reg_t pc; /* stored in EX_CONTEXT_K_0 */ diff --git a/arch/tile/include/uapi/asm/sigcontext.h b/arch/tile/include/uapi/asm/sigcontext.h index 6348e59d3724..39ff5d1a232d 100644 --- a/arch/tile/include/uapi/asm/sigcontext.h +++ b/arch/tile/include/uapi/asm/sigcontext.h @@ -24,10 +24,16 @@ * but is simplified since we know the fault is from userspace. */ struct sigcontext { - __uint_reg_t gregs[53]; /* General-purpose registers. */ - __uint_reg_t tp; /* Aliases gregs[TREG_TP]. */ - __uint_reg_t sp; /* Aliases gregs[TREG_SP]. */ - __uint_reg_t lr; /* Aliases gregs[TREG_LR]. */ + __extension__ union { + /* General-purpose registers. */ + __uint_reg_t gregs[56]; + __extension__ struct { + __uint_reg_t __gregs[53]; + __uint_reg_t tp; /* Aliases gregs[TREG_TP]. */ + __uint_reg_t sp; /* Aliases gregs[TREG_SP]. */ + __uint_reg_t lr; /* Aliases gregs[TREG_LR]. */ + }; + }; __uint_reg_t pc; /* Program counter. */ __uint_reg_t ics; /* In Interrupt Critical Section? */ __uint_reg_t faultnum; /* Fault number. */ diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c index b608e00e7f6d..aefb2c086726 100644 --- a/arch/tile/kernel/early_printk.c +++ b/arch/tile/kernel/early_printk.c @@ -43,13 +43,20 @@ static struct console early_hv_console = { void early_panic(const char *fmt, ...) { - va_list ap; + struct va_format vaf; + va_list args; + arch_local_irq_disable_all(); - va_start(ap, fmt); - early_printk("Kernel panic - not syncing: "); - early_vprintk(fmt, ap); - early_printk("\n"); - va_end(ap); + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + early_printk("Kernel panic - not syncing: %pV", &vaf); + + va_end(args); + dump_stack(); hv_halt(); } diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c index aca6000bca75..c4646bb99342 100644 --- a/arch/tile/kernel/hardwall.c +++ b/arch/tile/kernel/hardwall.c @@ -365,8 +365,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) * to quiesce. */ if (rect->teardown_in_progress) { - pr_notice("cpu %d: detected %s hardwall violation %#lx" - " while teardown already in progress\n", + pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n", cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); goto done; @@ -630,8 +629,7 @@ static void _hardwall_deactivate(struct hardwall_type *hwt, struct thread_struct *ts = &task->thread; if (cpumask_weight(&task->cpus_allowed) != 1) { - pr_err("pid %d (%s) releasing %s hardwall with" - " an affinity mask containing %d cpus!\n", + pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n", task->pid, task->comm, hwt->name, cpumask_weight(&task->cpus_allowed)); BUG(); diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c index ba85765e1436..22044fc691ef 100644 --- a/arch/tile/kernel/irq.c +++ b/arch/tile/kernel/irq.c @@ -107,9 +107,8 @@ void tile_dev_intr(struct pt_regs *regs, int intnum) { long sp = stack_pointer - (long) current_thread_info(); if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { - pr_emerg("tile_dev_intr: " - "stack overflow: %ld\n", - sp - sizeof(struct thread_info)); + pr_emerg("%s: stack overflow: %ld\n", + __func__, sp - sizeof(struct thread_info)); dump_stack(); } } diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c index 4cd88381a83e..ff5335ae050d 100644 --- a/arch/tile/kernel/kgdb.c +++ b/arch/tile/kernel/kgdb.c @@ -125,9 +125,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) { - int reg; struct pt_regs *thread_regs; - unsigned long *ptr = gdb_regs; if (task == NULL) return; @@ -136,9 +134,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) memset(gdb_regs, 0, NUMREGBYTES); thread_regs = task_pt_regs(task); - for (reg = 0; reg <= TREG_LAST_GPR; reg++) - *(ptr++) = thread_regs->regs[reg]; - + memcpy(gdb_regs, thread_regs, TREG_LAST_GPR * sizeof(unsigned long)); gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc; gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum; } diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c index 27cdcacbe81d..f8a45c51e9e4 100644 --- a/arch/tile/kernel/kprobes.c +++ b/arch/tile/kernel/kprobes.c @@ -90,8 +90,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return -EINVAL; if (insn_has_control(*p->addr)) { - pr_notice("Kprobes for control instructions are not " - "supported\n"); + pr_notice("Kprobes for control instructions are not supported\n"); return -EINVAL; } diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c index f0b54a934712..008aa2faef55 100644 --- a/arch/tile/kernel/machine_kexec.c +++ b/arch/tile/kernel/machine_kexec.c @@ -77,16 +77,13 @@ void machine_crash_shutdown(struct pt_regs *regs) int machine_kexec_prepare(struct kimage *image) { if (num_online_cpus() > 1) { - pr_warning("%s: detected attempt to kexec " - "with num_online_cpus() > 1\n", - __func__); + pr_warn("%s: detected attempt to kexec with num_online_cpus() > 1\n", + __func__); return -ENOSYS; } if (image->type != KEXEC_TYPE_DEFAULT) { - pr_warning("%s: detected attempt to kexec " - "with unsupported type: %d\n", - __func__, - image->type); + pr_warn("%s: detected attempt to kexec with unsupported type: %d\n", + __func__, image->type); return -ENOSYS; } return 0; @@ -131,8 +128,8 @@ static unsigned char *kexec_bn2cl(void *pg) */ csum = ip_compute_csum(pg, bhdrp->b_size); if (csum != 0) { - pr_warning("%s: bad checksum %#x (size %d)\n", - __func__, csum, bhdrp->b_size); + pr_warn("%s: bad checksum %#x (size %d)\n", + __func__, csum, bhdrp->b_size); return 0; } @@ -160,8 +157,7 @@ static unsigned char *kexec_bn2cl(void *pg) while (*desc != '\0') { desc++; if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) { - pr_info("%s: ran off end of page\n", - __func__); + pr_info("%s: ran off end of page\n", __func__); return 0; } } @@ -195,20 +191,18 @@ static void kexec_find_and_set_command_line(struct kimage *image) } if (command_line != 0) { - pr_info("setting new command line to \"%s\"\n", - command_line); + pr_info("setting new command line to \"%s\"\n", command_line); hverr = hv_set_command_line( (HV_VirtAddr) command_line, strlen(command_line)); kunmap_atomic(command_line); } else { - pr_info("%s: no command line found; making empty\n", - __func__); + pr_info("%s: no command line found; making empty\n", __func__); hverr = hv_set_command_line((HV_VirtAddr) command_line, 0); } if (hverr) - pr_warning("%s: hv_set_command_line returned error: %d\n", - __func__, hverr); + pr_warn("%s: hv_set_command_line returned error: %d\n", + __func__, hverr); } /* diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c index ac950be1318e..7475af3aacec 100644 --- a/arch/tile/kernel/messaging.c +++ b/arch/tile/kernel/messaging.c @@ -59,9 +59,8 @@ void hv_message_intr(struct pt_regs *regs, int intnum) { long sp = stack_pointer - (long) current_thread_info(); if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { - pr_emerg("hv_message_intr: " - "stack overflow: %ld\n", - sp - sizeof(struct thread_info)); + pr_emerg("%s: stack overflow: %ld\n", + __func__, sp - sizeof(struct thread_info)); dump_stack(); } } diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c index d19b13e3a59f..96447c9160a0 100644 --- a/arch/tile/kernel/module.c +++ b/arch/tile/kernel/module.c @@ -96,8 +96,8 @@ void module_free(struct module *mod, void *module_region) static int validate_hw2_last(long value, struct module *me) { if (((value << 16) >> 16) != value) { - pr_warning("module %s: Out of range HW2_LAST value %#lx\n", - me->name, value); + pr_warn("module %s: Out of range HW2_LAST value %#lx\n", + me->name, value); return 0; } return 1; @@ -210,10 +210,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, value -= (unsigned long) location; /* pc-relative */ value = (long) value >> 3; /* count by instrs */ if (!validate_jumpoff(value)) { - pr_warning("module %s: Out of range jump to" - " %#llx at %#llx (%p)\n", me->name, - sym->st_value + rel[i].r_addend, - rel[i].r_offset, location); + pr_warn("module %s: Out of range jump to %#llx at %#llx (%p)\n", + me->name, + sym->st_value + rel[i].r_addend, + rel[i].r_offset, location); return -ENOEXEC; } MUNGE(create_JumpOff_X1); diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index 1f80a88c75a6..f70c7892fa25 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -178,8 +178,8 @@ int __init tile_pci_init(void) continue; hv_cfg_fd1 = tile_pcie_open(i, 1); if (hv_cfg_fd1 < 0) { - pr_err("PCI: Couldn't open config fd to HV " - "for controller %d\n", i); + pr_err("PCI: Couldn't open config fd to HV for controller %d\n", + i); goto err_cont; } @@ -423,8 +423,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) for (i = 0; i < 6; i++) { r = &dev->resource[i]; if (r->flags & IORESOURCE_UNSET) { - pr_err("PCI: Device %s not available " - "because of resource collisions\n", + pr_err("PCI: Device %s not available because of resource collisions\n", pci_name(dev)); return -EINVAL; } diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index e39f9c542807..2c95f37ebbed 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -131,8 +131,7 @@ static int tile_irq_cpu(int irq) count = cpumask_weight(&intr_cpus_map); if (unlikely(count == 0)) { - pr_warning("intr_cpus_map empty, interrupts will be" - " delievered to dataplane tiles\n"); + pr_warn("intr_cpus_map empty, interrupts will be delievered to dataplane tiles\n"); return irq % (smp_height * smp_width); } @@ -197,16 +196,16 @@ static int tile_pcie_open(int trio_index) /* Get the properties of the PCIe ports on this TRIO instance. */ ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]); if (ret < 0) { - pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d," - " on TRIO %d\n", ret, trio_index); + pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n", + ret, trio_index); goto get_port_property_failure; } context->mmio_base_mac = iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE); if (context->mmio_base_mac == NULL) { - pr_err("PCI: TRIO config space mapping failure, error %d," - " on TRIO %d\n", ret, trio_index); + pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n", + ret, trio_index); ret = -ENOMEM; goto trio_mmio_mapping_failure; @@ -622,9 +621,8 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller) dev_control.max_read_req_sz, mac); if (err < 0) { - pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, " - "MAC %d on TRIO %d\n", - mac, controller->trio_index); + pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n", + mac, controller->trio_index); } } @@ -720,27 +718,24 @@ int __init pcibios_init(void) reg_offset); if (!port_status.dl_up) { if (rc_delay[trio_index][mac]) { - pr_info("Delaying PCIe RC TRIO init %d sec" - " on MAC %d on TRIO %d\n", + pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n", rc_delay[trio_index][mac], mac, trio_index); msleep(rc_delay[trio_index][mac] * 1000); } ret = gxio_trio_force_rc_link_up(trio_context, mac); if (ret < 0) - pr_err("PCI: PCIE_FORCE_LINK_UP failure, " - "MAC %d on TRIO %d\n", mac, trio_index); + pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n", + mac, trio_index); } - pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i, - trio_index, controller->mac); + pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", + i, trio_index, controller->mac); /* Delay the bus probe if needed. */ if (rc_delay[trio_index][mac]) { - pr_info("Delaying PCIe RC bus enumerating %d sec" - " on MAC %d on TRIO %d\n", - rc_delay[trio_index][mac], mac, - trio_index); + pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n", + rc_delay[trio_index][mac], mac, trio_index); msleep(rc_delay[trio_index][mac] * 1000); } else { /* @@ -758,11 +753,10 @@ int __init pcibios_init(void) if (pcie_ports[trio_index].ports[mac].removable) { pr_info("PCI: link is down, MAC %d on TRIO %d\n", mac, trio_index); - pr_info("This is expected if no PCIe card" - " is connected to this link\n"); + pr_info("This is expected if no PCIe card is connected to this link\n"); } else pr_err("PCI: link is down, MAC %d on TRIO %d\n", - mac, trio_index); + mac, trio_index); continue; } @@ -829,8 +823,8 @@ int __init pcibios_init(void) /* Alloc a PIO region for PCI config access per MAC. */ ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); if (ret < 0) { - pr_err("PCI: PCI CFG PIO alloc failure for mac %d " - "on TRIO %d, give up\n", mac, trio_index); + pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n", + mac, trio_index); continue; } @@ -842,8 +836,8 @@ int __init pcibios_init(void) trio_context->pio_cfg_index[mac], mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); if (ret < 0) { - pr_err("PCI: PCI CFG PIO init failure for mac %d " - "on TRIO %d, give up\n", mac, trio_index); + pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n", + mac, trio_index); continue; } @@ -865,7 +859,7 @@ int __init pcibios_init(void) (TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1))); if (trio_context->mmio_base_pio_cfg[mac] == NULL) { pr_err("PCI: PIO map failure for mac %d on TRIO %d\n", - mac, trio_index); + mac, trio_index); continue; } @@ -925,9 +919,8 @@ int __init pcibios_init(void) /* Alloc a PIO region for PCI memory access for each RC port. */ ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); if (ret < 0) { - pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, " - "give up\n", controller->trio_index, - controller->mac); + pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n", + controller->trio_index, controller->mac); continue; } @@ -944,9 +937,8 @@ int __init pcibios_init(void) 0, 0); if (ret < 0) { - pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, " - "give up\n", controller->trio_index, - controller->mac); + pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n", + controller->trio_index, controller->mac); continue; } @@ -957,9 +949,8 @@ int __init pcibios_init(void) */ ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); if (ret < 0) { - pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, " - "give up\n", controller->trio_index, - controller->mac); + pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n", + controller->trio_index, controller->mac); continue; } @@ -976,9 +967,8 @@ int __init pcibios_init(void) 0, HV_TRIO_PIO_FLAG_IO_SPACE); if (ret < 0) { - pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, " - "give up\n", controller->trio_index, - controller->mac); + pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n", + controller->trio_index, controller->mac); continue; } @@ -997,10 +987,9 @@ int __init pcibios_init(void) ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); if (ret < 0) { - pr_err("PCI: Mem-Map alloc failure on TRIO %d " - "mac %d for MC %d, give up\n", - controller->trio_index, - controller->mac, j); + pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n", + controller->trio_index, controller->mac, + j); goto alloc_mem_map_failed; } @@ -1030,10 +1019,9 @@ int __init pcibios_init(void) j, GXIO_TRIO_ORDER_MODE_UNORDERED); if (ret < 0) { - pr_err("PCI: Mem-Map init failure on TRIO %d " - "mac %d for MC %d, give up\n", - controller->trio_index, - controller->mac, j); + pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n", + controller->trio_index, controller->mac, + j); goto alloc_mem_map_failed; } @@ -1453,7 +1441,7 @@ static struct pci_ops tile_cfg_ops = { static unsigned int tilegx_msi_startup(struct irq_data *d) { if (d->msi_desc) - unmask_msi_irq(d); + pci_msi_unmask_irq(d); return 0; } @@ -1465,14 +1453,14 @@ static void tilegx_msi_ack(struct irq_data *d) static void tilegx_msi_mask(struct irq_data *d) { - mask_msi_irq(d); + pci_msi_mask_irq(d); __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); } static void tilegx_msi_unmask(struct irq_data *d) { __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); - unmask_msi_irq(d); + pci_msi_unmask_irq(d); } static struct irq_chip tilegx_msi_chip = { @@ -1510,9 +1498,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) * Most PCIe endpoint devices do support 64-bit message addressing. */ if (desc->msi_attrib.is_64 == 0) { - dev_printk(KERN_INFO, &pdev->dev, - "64-bit MSI message address not supported, " - "falling back to legacy interrupts.\n"); + dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n"); ret = -ENOMEM; goto is_64_failure; @@ -1549,11 +1535,8 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) /* SQ regions are out, allocate from map mem regions. */ mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); if (mem_map < 0) { - dev_printk(KERN_INFO, &pdev->dev, - "%s Mem-Map alloc failure. " - "Failed to initialize MSI interrupts. " - "Falling back to legacy interrupts.\n", - desc->msi_attrib.is_msix ? "MSI-X" : "MSI"); + dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n", + desc->msi_attrib.is_msix ? "MSI-X" : "MSI"); ret = -ENOMEM; goto msi_mem_map_alloc_failure; } @@ -1580,7 +1563,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) mem_map, mem_map_base, mem_map_limit, trio_context->asid); if (ret < 0) { - dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n"); + dev_info(&pdev->dev, "HV MSI config failed\n"); goto hv_msi_config_failure; } @@ -1590,7 +1573,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) msg.address_hi = msi_addr >> 32; msg.address_lo = msi_addr & 0xffffffff; - write_msi_msg(irq, &msg); + pci_write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); irq_set_handler_data(irq, controller); diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 0050cbc1d9de..48e5773dd0b7 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -52,7 +52,7 @@ static int __init idle_setup(char *str) return -EINVAL; if (!strcmp(str, "poll")) { - pr_info("using polling idle threads.\n"); + pr_info("using polling idle threads\n"); cpu_idle_poll_ctrl(true); return 0; } else if (!strcmp(str, "halt")) { @@ -547,27 +547,25 @@ void show_regs(struct pt_regs *regs) struct task_struct *tsk = validate_current(); int i; - pr_err("\n"); if (tsk != &corrupt_current) show_regs_print_info(KERN_ERR); #ifdef __tilegx__ for (i = 0; i < 17; i++) - pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n", + pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n", i, regs->regs[i], i+18, regs->regs[i+18], i+36, regs->regs[i+36]); - pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n", + pr_err(" r17: " REGFMT " r35: " REGFMT " tp : " REGFMT "\n", regs->regs[17], regs->regs[35], regs->tp); - pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); + pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr); #else for (i = 0; i < 13; i++) - pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT - " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", + pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n", i, regs->regs[i], i+14, regs->regs[i+14], i+27, regs->regs[i+27], i+40, regs->regs[i+40]); - pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n", + pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n", regs->regs[13], regs->tp, regs->sp, regs->lr); #endif - pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", + pr_err(" pc : " REGFMT " ex1: %ld faultnum: %ld\n", regs->pc, regs->ex1, regs->faultnum); dump_stack_regs(regs); diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index b9736ded06f2..864eea69556d 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -130,7 +130,7 @@ static int __init setup_maxmem(char *str) maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); pr_info("Forcing RAM used to no more than %dMB\n", - maxmem_pfn >> (20 - PAGE_SHIFT)); + maxmem_pfn >> (20 - PAGE_SHIFT)); return 0; } early_param("maxmem", setup_maxmem); @@ -149,7 +149,7 @@ static int __init setup_maxnodemem(char *str) maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); pr_info("Forcing RAM used on node %ld to no more than %dMB\n", - node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); + node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); return 0; } early_param("maxnodemem", setup_maxnodemem); @@ -417,8 +417,7 @@ static void __init setup_memory(void) range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; range.size -= (range.start - start_pa); range.size &= HPAGE_MASK; - pr_err("Range not hugepage-aligned: %#llx..%#llx:" - " now %#llx-%#llx\n", + pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n", start_pa, start_pa + orig_size, range.start, range.start + range.size); } @@ -437,8 +436,8 @@ static void __init setup_memory(void) if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { int max_size = maxnodemem_pfn[i]; if (max_size > 0) { - pr_err("Maxnodemem reduced node %d to" - " %d pages\n", i, max_size); + pr_err("Maxnodemem reduced node %d to %d pages\n", + i, max_size); range.size = PFN_PHYS(max_size); } else { pr_err("Maxnodemem disabled node %d\n", i); @@ -490,8 +489,8 @@ static void __init setup_memory(void) NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); if (end < pci_reserve_end_pfn + percpu_pages) { end = pci_reserve_start_pfn; - pr_err("PCI mapping region reduced node %d to" - " %ld pages\n", i, end - start); + pr_err("PCI mapping region reduced node %d to %ld pages\n", + i, end - start); } } #endif @@ -534,11 +533,10 @@ static void __init setup_memory(void) } } physpages -= dropped_pages; - pr_warning("Only using %ldMB memory;" - " ignoring %ldMB.\n", - physpages >> (20 - PAGE_SHIFT), - dropped_pages >> (20 - PAGE_SHIFT)); - pr_warning("Consider using a larger page size.\n"); + pr_warn("Only using %ldMB memory - ignoring %ldMB\n", + physpages >> (20 - PAGE_SHIFT), + dropped_pages >> (20 - PAGE_SHIFT)); + pr_warn("Consider using a larger page size\n"); } #endif @@ -556,25 +554,23 @@ static void __init setup_memory(void) MAXMEM_PFN : mappable_physpages; highmem_pages = (long) (physpages - lowmem_pages); - pr_notice("%ldMB HIGHMEM available.\n", - pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); - pr_notice("%ldMB LOWMEM available.\n", - pages_to_mb(lowmem_pages)); + pr_notice("%ldMB HIGHMEM available\n", + pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); + pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages)); #else /* Set max_low_pfn based on what node 0 can directly address. */ max_low_pfn = node_end_pfn[0]; #ifndef __tilegx__ if (node_end_pfn[0] > MAXMEM_PFN) { - pr_warning("Only using %ldMB LOWMEM.\n", - MAXMEM>>20); - pr_warning("Use a HIGHMEM enabled kernel.\n"); + pr_warn("Only using %ldMB LOWMEM\n", MAXMEM >> 20); + pr_warn("Use a HIGHMEM enabled kernel\n"); max_low_pfn = MAXMEM_PFN; max_pfn = MAXMEM_PFN; node_end_pfn[0] = MAXMEM_PFN; } else { - pr_notice("%ldMB memory available.\n", - pages_to_mb(node_end_pfn[0])); + pr_notice("%ldMB memory available\n", + pages_to_mb(node_end_pfn[0])); } for (i = 1; i < MAX_NUMNODES; ++i) { node_start_pfn[i] = 0; @@ -589,8 +585,7 @@ static void __init setup_memory(void) if (pages) high_memory = pfn_to_kaddr(node_end_pfn[i]); } - pr_notice("%ldMB memory available.\n", - pages_to_mb(lowmem_pages)); + pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages)); #endif #endif } @@ -1112,8 +1107,8 @@ static void __init load_hv_initrd(void) fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); if (fd == HV_ENOENT) { if (set_initramfs_file) { - pr_warning("No such hvfs initramfs file '%s'\n", - initramfs_file); + pr_warn("No such hvfs initramfs file '%s'\n", + initramfs_file); return; } else { /* Try old backwards-compatible name. */ @@ -1126,8 +1121,8 @@ static void __init load_hv_initrd(void) stat = hv_fs_fstat(fd); BUG_ON(stat.size < 0); if (stat.flags & HV_FS_ISDIR) { - pr_warning("Ignoring hvfs file '%s': it's a directory.\n", - initramfs_file); + pr_warn("Ignoring hvfs file '%s': it's a directory\n", + initramfs_file); return; } initrd = alloc_bootmem_pages(stat.size); @@ -1185,9 +1180,8 @@ static void __init validate_hv(void) HV_Topology topology = hv_inquire_topology(); BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); if (topology.width != 1 || topology.height != 1) { - pr_warning("Warning: booting UP kernel on %dx%d grid;" - " will ignore all but first tile.\n", - topology.width, topology.height); + pr_warn("Warning: booting UP kernel on %dx%d grid; will ignore all but first tile\n", + topology.width, topology.height); } #endif @@ -1208,9 +1202,8 @@ static void __init validate_hv(void) * We use a struct cpumask for this, so it must be big enough. */ if ((smp_height * smp_width) > nr_cpu_ids) - early_panic("Hypervisor %d x %d grid too big for Linux" - " NR_CPUS %d\n", smp_height, smp_width, - nr_cpu_ids); + early_panic("Hypervisor %d x %d grid too big for Linux NR_CPUS %d\n", + smp_height, smp_width, nr_cpu_ids); #endif /* @@ -1265,10 +1258,9 @@ static void __init validate_va(void) /* Kernel PCs must have their high bit set; see intvec.S. */ if ((long)VMALLOC_START >= 0) - early_panic( - "Linux VMALLOC region below the 2GB line (%#lx)!\n" - "Reconfigure the kernel with smaller VMALLOC_RESERVE.\n", - VMALLOC_START); + early_panic("Linux VMALLOC region below the 2GB line (%#lx)!\n" + "Reconfigure the kernel with smaller VMALLOC_RESERVE\n", + VMALLOC_START); #endif } @@ -1395,7 +1387,7 @@ static void __init setup_cpu_maps(void) static int __init dataplane(char *str) { - pr_warning("WARNING: dataplane support disabled in this kernel\n"); + pr_warn("WARNING: dataplane support disabled in this kernel\n"); return 0; } @@ -1413,8 +1405,8 @@ void __init setup_arch(char **cmdline_p) len = hv_get_command_line((HV_VirtAddr) boot_command_line, COMMAND_LINE_SIZE); if (boot_command_line[0]) - pr_warning("WARNING: ignoring dynamic command line \"%s\"\n", - boot_command_line); + pr_warn("WARNING: ignoring dynamic command line \"%s\"\n", + boot_command_line); strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); #else char *hv_cmdline; @@ -1540,8 +1532,7 @@ static void __init pcpu_fc_populate_pte(unsigned long addr) BUG_ON(pgd_addr_invalid(addr)); if (addr < VMALLOC_START || addr >= VMALLOC_END) - panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;" - " try increasing CONFIG_VMALLOC_RESERVE\n", + panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n", addr, VMALLOC_START, VMALLOC_END); pgd = swapper_pg_dir + pgd_index(addr); @@ -1596,8 +1587,8 @@ void __init setup_per_cpu_areas(void) lowmem_va = (unsigned long)pfn_to_kaddr(pfn); ptep = virt_to_kpte(lowmem_va); if (pte_huge(*ptep)) { - printk(KERN_DEBUG "early shatter of huge page" - " at %#lx\n", lowmem_va); + printk(KERN_DEBUG "early shatter of huge page at %#lx\n", + lowmem_va); shatter_pmd((pmd_t *)ptep); ptep = virt_to_kpte(lowmem_va); BUG_ON(pte_huge(*ptep)); diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 7c2fecc52177..bb0a9ce7ae23 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -45,8 +45,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { - int err = 0; - int i; + int err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; @@ -57,9 +56,7 @@ int restore_sigcontext(struct pt_regs *regs, */ BUILD_BUG_ON(sizeof(struct sigcontext) != sizeof(struct pt_regs)); BUILD_BUG_ON(sizeof(struct sigcontext) % 8 != 0); - - for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) - err |= __get_user(regs->regs[i], &sc->gregs[i]); + err = __copy_from_user(regs, sc, sizeof(*regs)); /* Ensure that the PL is always set to USER_PL. */ regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1)); @@ -110,12 +107,7 @@ badframe: int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { - int i, err = 0; - - for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) - err |= __put_user(regs->regs[i], &sc->gregs[i]); - - return err; + return __copy_to_user(sc, regs, sizeof(*regs)); } /* @@ -345,7 +337,6 @@ static void dump_mem(void __user *address) int i, j, k; int found_readable_mem = 0; - pr_err("\n"); if (!access_ok(VERIFY_READ, address, 1)) { pr_err("Not dumping at address 0x%lx (kernel address)\n", (unsigned long)address); @@ -367,7 +358,7 @@ static void dump_mem(void __user *address) (unsigned long)address); found_readable_mem = 1; } - j = sprintf(line, REGFMT":", (unsigned long)addr); + j = sprintf(line, REGFMT ":", (unsigned long)addr); for (k = 0; k < bytes_per_line; ++k) j += sprintf(&line[j], " %02x", buf[k]); pr_err("%s\n", line); @@ -411,8 +402,7 @@ void trace_unhandled_signal(const char *type, struct pt_regs *regs, case SIGFPE: case SIGSEGV: case SIGBUS: - pr_err("User crash: signal %d," - " trap %ld, address 0x%lx\n", + pr_err("User crash: signal %d, trap %ld, address 0x%lx\n", sig, regs->faultnum, address); show_regs(regs); dump_mem((void __user *)address); diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index 6cb2ce31b5a2..862973074bf9 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c @@ -222,11 +222,9 @@ static tilepro_bundle_bits rewrite_load_store_unaligned( } if (unaligned_printk || unaligned_fixup_count == 0) { - pr_info("Process %d/%s: PC %#lx: Fixup of" - " unaligned %s at %#lx.\n", + pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n", current->pid, current->comm, regs->pc, - (mem_op == MEMOP_LOAD || - mem_op == MEMOP_LOAD_POSTINCR) ? + mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ? "load" : "store", (unsigned long)addr); if (!unaligned_printk) { diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c index 0d59a1b60c74..20d52a98e171 100644 --- a/arch/tile/kernel/smpboot.c +++ b/arch/tile/kernel/smpboot.c @@ -127,8 +127,7 @@ static __init int reset_init_affinity(void) { long rc = sched_setaffinity(current->pid, &init_affinity); if (rc != 0) - pr_warning("couldn't reset init affinity (%ld)\n", - rc); + pr_warn("couldn't reset init affinity (%ld)\n", rc); return 0; } late_initcall(reset_init_affinity); @@ -174,7 +173,7 @@ static void start_secondary(void) /* Indicate that we're ready to come up. */ /* Must not do this before we're ready to receive messages */ if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { - pr_warning("CPU#%d already started!\n", cpuid); + pr_warn("CPU#%d already started!\n", cpuid); for (;;) local_irq_enable(); } diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index c93977a62116..7ff5afdbd3aa 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c @@ -387,9 +387,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) * then bust_spinlocks() spit out a space in front of us * and it will mess up our KERN_ERR. */ - pr_err("\n"); - pr_err("Starting stack dump of tid %d, pid %d (%s)" - " on cpu %d at cycle %lld\n", + pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n", kbt->task->pid, kbt->task->tgid, kbt->task->comm, raw_smp_processor_id(), get_cycles()); } @@ -411,8 +409,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) i++, address, namebuf, (unsigned long)(kbt->it.sp)); if (i >= 100) { - pr_err("Stack dump truncated" - " (%d frames)\n", i); + pr_err("Stack dump truncated (%d frames)\n", i); break; } } diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index b854a1cd0079..d412b0856c0a 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -98,8 +98,8 @@ void __init calibrate_delay(void) { loops_per_jiffy = get_clock_rate() / HZ; pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n", - loops_per_jiffy/(500000/HZ), - (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); + loops_per_jiffy / (500000 / HZ), + (loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy); } /* Called fairly late in init/main.c, but before we go smp. */ diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 86900ccd4977..bf841ca517bb 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -46,9 +46,9 @@ static int __init setup_unaligned_fixup(char *str) return 0; pr_info("Fixups for unaligned data accesses are %s\n", - unaligned_fixup >= 0 ? - (unaligned_fixup ? "enabled" : "disabled") : - "completely disabled"); + unaligned_fixup >= 0 ? + (unaligned_fixup ? "enabled" : "disabled") : + "completely disabled"); return 1; } __setup("unaligned_fixup=", setup_unaligned_fixup); @@ -305,8 +305,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num, case INT_ILL: if (copy_from_user(&instr, (void __user *)regs->pc, sizeof(instr))) { - pr_err("Unreadable instruction for INT_ILL:" - " %#lx\n", regs->pc); + pr_err("Unreadable instruction for INT_ILL: %#lx\n", + regs->pc); do_exit(SIGKILL); return; } diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c index c02ea2a45f67..7d9a83be0aca 100644 --- a/arch/tile/kernel/unaligned.c +++ b/arch/tile/kernel/unaligned.c @@ -969,8 +969,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, unaligned_fixup_count++; if (unaligned_printk) { - pr_info("%s/%d. Unalign fixup for kernel access " - "to userspace %lx.", + pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n", current->comm, current->pid, regs->regs[ra]); } @@ -985,7 +984,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, .si_addr = (unsigned char __user *)0 }; if (unaligned_printk) - pr_info("Unalign bundle: unexp @%llx, %llx", + pr_info("Unalign bundle: unexp @%llx, %llx\n", (unsigned long long)regs->pc, (unsigned long long)bundle); @@ -1370,8 +1369,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, frag.bundle = bundle; if (unaligned_printk) { - pr_info("%s/%d, Unalign fixup: pc=%lx " - "bundle=%lx %d %d %d %d %d %d %d %d.", + pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n", current->comm, current->pid, (unsigned long)frag.pc, (unsigned long)frag.bundle, @@ -1380,8 +1378,8 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, (int)y1_lr, (int)y1_br, (int)x1_add); for (k = 0; k < n; k += 2) - pr_info("[%d] %016llx %016llx", k, - (unsigned long long)frag.insn[k], + pr_info("[%d] %016llx %016llx\n", + k, (unsigned long long)frag.insn[k], (unsigned long long)frag.insn[k+1]); } @@ -1402,7 +1400,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, .si_addr = (void __user *)&jit_code_area[idx] }; - pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx", + pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n", current->pid, current->comm, (unsigned long long)&jit_code_area[idx]); @@ -1485,7 +1483,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum) /* If exception came from kernel, try fix it up. */ if (fixup_exception(regs)) { if (unaligned_printk) - pr_info("Unalign fixup: %d %llx @%llx", + pr_info("Unalign fixup: %d %llx @%llx\n", (int)unaligned_fixup, (unsigned long long)regs->ex1, (unsigned long long)regs->pc); @@ -1519,7 +1517,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum) }; if (unaligned_printk) - pr_info("Unalign fixup: %d %llx @%llx", + pr_info("Unalign fixup: %d %llx @%llx\n", (int)unaligned_fixup, (unsigned long long)regs->ex1, (unsigned long long)regs->pc); @@ -1579,14 +1577,14 @@ void do_unaligned(struct pt_regs *regs, int vecnum) 0); if (IS_ERR((void __force *)user_page)) { - pr_err("Out of kernel pages trying do_mmap.\n"); + pr_err("Out of kernel pages trying do_mmap\n"); return; } /* Save the address in the thread_info struct */ info->unalign_jit_base = user_page; if (unaligned_printk) - pr_info("Unalign bundle: %d:%d, allocate page @%llx", + pr_info("Unalign bundle: %d:%d, allocate page @%llx\n", raw_smp_processor_id(), current->pid, (unsigned long long)user_page); } diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 6c0571216a9d..565e25a98334 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c @@ -169,8 +169,7 @@ static void wait_for_migration(pte_t *pte) while (pte_migrating(*pte)) { barrier(); if (++retries > bound) - panic("Hit migrating PTE (%#llx) and" - " page PFN %#lx still migrating", + panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating", pte->val, pte_pfn(*pte)); } } @@ -292,11 +291,10 @@ static int handle_page_fault(struct pt_regs *regs, */ stack_offset = stack_pointer & (THREAD_SIZE-1); if (stack_offset < THREAD_SIZE / 8) { - pr_alert("Potential stack overrun: sp %#lx\n", - stack_pointer); + pr_alert("Potential stack overrun: sp %#lx\n", stack_pointer); show_regs(regs); pr_alert("Killing current process %d/%s\n", - tsk->pid, tsk->comm); + tsk->pid, tsk->comm); do_group_exit(SIGKILL); } @@ -421,7 +419,7 @@ good_area: } else if (write) { #ifdef TEST_VERIFY_AREA if (!is_page_fault && regs->cs == KERNEL_CS) - pr_err("WP fault at "REGFMT"\n", regs->eip); + pr_err("WP fault at " REGFMT "\n", regs->eip); #endif if (!(vma->vm_flags & VM_WRITE)) goto bad_area; @@ -519,16 +517,15 @@ no_context: pte_t *pte = lookup_address(address); if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) - pr_crit("kernel tried to execute" - " non-executable page - exploit attempt?" - " (uid: %d)\n", current->uid); + pr_crit("kernel tried to execute non-executable page - exploit attempt? (uid: %d)\n", + current->uid); } #endif if (address < PAGE_SIZE) pr_alert("Unable to handle kernel NULL pointer dereference\n"); else pr_alert("Unable to handle kernel paging request\n"); - pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n", + pr_alert(" at virtual address " REGFMT ", pc " REGFMT "\n", address, regs->pc); show_regs(regs); @@ -575,9 +572,10 @@ do_sigbus: #ifndef __tilegx__ /* We must release ICS before panicking or we won't get anywhere. */ -#define ics_panic(fmt, ...) do { \ - __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ - panic(fmt, __VA_ARGS__); \ +#define ics_panic(fmt, ...) \ +do { \ + __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ + panic(fmt, ##__VA_ARGS__); \ } while (0) /* @@ -615,8 +613,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, fault_num != INT_DTLB_ACCESS)) { unsigned long old_pc = regs->pc; regs->pc = pc; - ics_panic("Bad ICS page fault args:" - " old PC %#lx, fault %d/%d at %#lx\n", + ics_panic("Bad ICS page fault args: old PC %#lx, fault %d/%d at %#lx", old_pc, fault_num, write, address); } @@ -669,8 +666,8 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, #endif fixup = search_exception_tables(pc); if (!fixup) - ics_panic("ICS atomic fault not in table:" - " PC %#lx, fault %d", pc, fault_num); + ics_panic("ICS atomic fault not in table: PC %#lx, fault %d", + pc, fault_num); regs->pc = fixup->fixup; regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0); } @@ -826,8 +823,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num, set_thread_flag(TIF_ASYNC_TLB); if (async->fault_num != 0) { - panic("Second async fault %d;" - " old fault was %d (%#lx/%ld)", + panic("Second async fault %d; old fault was %d (%#lx/%ld)", fault_num, async->fault_num, address, write); } diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index 33294fdc402e..cd3387370ebb 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c @@ -152,12 +152,10 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); - pr_err("hv_flush_remote(%#llx, %#lx, %p [%s]," - " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", + pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", cache_pa, cache_control, cache_cpumask, cache_buf, (unsigned long)tlb_va, tlb_length, tlb_pgsize, - tlb_cpumask, tlb_buf, - asids, asidcount, rc); + tlb_cpumask, tlb_buf, asids, asidcount, rc); panic("Unsafe to continue."); } diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index e514899e1100..3270e0019266 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -284,22 +284,21 @@ static __init int __setup_hugepagesz(unsigned long ps) int level, base_shift; if ((1UL << log_ps) != ps || (log_ps & 1) != 0) { - pr_warn("Not enabling %ld byte huge pages;" - " must be a power of four.\n", ps); + pr_warn("Not enabling %ld byte huge pages; must be a power of four\n", + ps); return -EINVAL; } if (ps > 64*1024*1024*1024UL) { - pr_warn("Not enabling %ld MB huge pages;" - " largest legal value is 64 GB .\n", ps >> 20); + pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n", + ps >> 20); return -EINVAL; } else if (ps >= PUD_SIZE) { static long hv_jpage_size; if (hv_jpage_size == 0) hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO); if (hv_jpage_size != PUD_SIZE) { - pr_warn("Not enabling >= %ld MB huge pages:" - " hypervisor reports size %ld\n", + pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n", PUD_SIZE >> 20, hv_jpage_size); return -EINVAL; } @@ -320,14 +319,13 @@ static __init int __setup_hugepagesz(unsigned long ps) int shift_val = log_ps - base_shift; if (huge_shift[level] != 0) { int old_shift = base_shift + huge_shift[level]; - pr_warn("Not enabling %ld MB huge pages;" - " already have size %ld MB.\n", + pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n", ps >> 20, (1UL << old_shift) >> 20); return -EINVAL; } if (hv_set_pte_super_shift(level, shift_val) != 0) { - pr_warn("Not enabling %ld MB huge pages;" - " no hypervisor support.\n", ps >> 20); + pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n", + ps >> 20); return -EINVAL; } printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20); diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index caa270165f86..be240cc4978d 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -357,11 +357,11 @@ static int __init setup_ktext(char *str) cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); if (cpumask_weight(&ktext_mask) > 1) { ktext_small = 1; - pr_info("ktext: using caching neighborhood %s " - "with small pages\n", buf); + pr_info("ktext: using caching neighborhood %s with small pages\n", + buf); } else { pr_info("ktext: caching on cpu %s with one huge page\n", - buf); + buf); } } @@ -413,19 +413,16 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) int rc, i; if (ktext_arg_seen && ktext_hash) { - pr_warning("warning: \"ktext\" boot argument ignored" - " if \"kcache_hash\" sets up text hash-for-home\n"); + pr_warn("warning: \"ktext\" boot argument ignored if \"kcache_hash\" sets up text hash-for-home\n"); ktext_small = 0; } if (kdata_arg_seen && kdata_hash) { - pr_warning("warning: \"kdata\" boot argument ignored" - " if \"kcache_hash\" sets up data hash-for-home\n"); + pr_warn("warning: \"kdata\" boot argument ignored if \"kcache_hash\" sets up data hash-for-home\n"); } if (kdata_huge && !hash_default) { - pr_warning("warning: disabling \"kdata=huge\"; requires" - " kcache_hash=all or =allbutstack\n"); + pr_warn("warning: disabling \"kdata=huge\"; requires kcache_hash=all or =allbutstack\n"); kdata_huge = 0; } @@ -470,8 +467,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) pte[pte_ofs] = pfn_pte(pfn, prot); } else { if (kdata_huge) - printk(KERN_DEBUG "pre-shattered huge" - " page at %#lx\n", address); + printk(KERN_DEBUG "pre-shattered huge page at %#lx\n", + address); for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; pfn++, pte_ofs++, address += PAGE_SIZE) { pgprot_t prot = init_pgprot(address); @@ -501,8 +498,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) pr_info("ktext: not using unavailable cpus %s\n", buf); } if (cpumask_empty(&ktext_mask)) { - pr_warning("ktext: no valid cpus; caching on %d.\n", - smp_processor_id()); + pr_warn("ktext: no valid cpus; caching on %d\n", + smp_processor_id()); cpumask_copy(&ktext_mask, cpumask_of(smp_processor_id())); } @@ -798,11 +795,9 @@ void __init mem_init(void) #ifdef CONFIG_HIGHMEM /* check that fixmap and pkmap do not overlap */ if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { - pr_err("fixmap and kmap areas overlap" - " - this will crash\n"); + pr_err("fixmap and kmap areas overlap - this will crash\n"); pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n", - PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), - FIXADDR_START); + PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START); BUG(); } #endif @@ -926,8 +921,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end) unsigned long addr = (unsigned long) begin; if (kdata_huge && !initfree) { - pr_warning("Warning: ignoring initfree=0:" - " incompatible with kdata=huge\n"); + pr_warn("Warning: ignoring initfree=0: incompatible with kdata=huge\n"); initfree = 1; } end = (end + PAGE_SIZE - 1) & PAGE_MASK; diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 5e86eac4bfae..7bf2491a9c1f 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -44,9 +44,7 @@ void show_mem(unsigned int filter) { struct zone *zone; - pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" - " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" - " pagecache:%lu swap:%lu\n", + pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n", (global_page_state(NR_ACTIVE_ANON) + global_page_state(NR_ACTIVE_FILE)), (global_page_state(NR_INACTIVE_ANON) + |