diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-12 01:50:01 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-12 01:50:01 +0200 |
commit | 7ec6131b55184084d091953fad9e5c785c5b500b (patch) | |
tree | 9c777700e2c237946be8fe775903168f11b77f29 /arch/tile | |
parent | Merge tag 'modules-next-for-linus' of git://git.kernel.org/pub/scm/linux/kern... (diff) | |
parent | arch: tile: kernel: unaligned.c: Cleaning up uninitialized variables (diff) | |
download | linux-7ec6131b55184084d091953fad9e5c785c5b500b.tar.xz linux-7ec6131b55184084d091953fad9e5c785c5b500b.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
Pull arch/tile changes from Chris Metcalf:
"These mostly just address smaller issues reported to me"
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
arch: tile: kernel: unaligned.c: Cleaning up uninitialized variables
drivers/tty/hvc/hvc_tile.c: use PTR_ERR_OR_ZERO
replace strict_strto* call with kstrto*
tile: Update comments for generic idle conversion
tile: cleanup the comment in init_pgprot
tile: use BOOTMEM_DEFAULT instead of magic number 0 for reserve_bootmem flags
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/tile/kernel/setup.c | 12 | ||||
-rw-r--r-- | arch/tile/kernel/signal.c | 7 | ||||
-rw-r--r-- | arch/tile/kernel/traps.c | 5 | ||||
-rw-r--r-- | arch/tile/kernel/unaligned.c | 15 | ||||
-rw-r--r-- | arch/tile/mm/init.c | 8 |
6 files changed, 17 insertions, 32 deletions
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index d767ff9f59b9..48e4fd0f38e4 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -94,7 +94,7 @@ register unsigned long stack_pointer __asm__("sp"); /* Sit on a nap instruction until interrupted. */ extern void smp_nap(void); -/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ +/* Enable interrupts racelessly and nap forever: helper for arch_cpu_idle(). */ extern void _cpu_idle(void); #else /* __ASSEMBLY__ */ diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 74c91729a62a..112ababa9e55 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -228,13 +228,10 @@ early_param("isolnodes", setup_isolnodes); #if defined(CONFIG_PCI) && !defined(__tilegx__) static int __init setup_pci_reserve(char* str) { - unsigned long mb; - - if (str == NULL || strict_strtoul(str, 0, &mb) != 0 || - mb > 3 * 1024) + if (str == NULL || kstrtouint(str, 0, &pci_reserve_mb) != 0 || + pci_reserve_mb > 3 * 1024) return -EINVAL; - pci_reserve_mb = mb; pr_info("Reserving %dMB for PCIE root complex mappings\n", pci_reserve_mb); return 0; @@ -691,7 +688,7 @@ static void __init setup_bootmem_allocator(void) /* Reserve any memory excluded by "memmap" arguments. */ for (i = 0; i < memmap_nr; ++i) { struct memmap_entry *m = &memmap_map[i]; - reserve_bootmem(m->addr, m->size, 0); + reserve_bootmem(m->addr, m->size, BOOTMEM_DEFAULT); } #ifdef CONFIG_BLK_DEV_INITRD @@ -715,7 +712,8 @@ static void __init setup_bootmem_allocator(void) #ifdef CONFIG_KEXEC if (crashk_res.start != crashk_res.end) - reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0); + reserve_bootmem(crashk_res.start, resource_size(&crashk_res), + BOOTMEM_DEFAULT); #endif } diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 2d1dbf38a9ab..d1d026f01267 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -321,14 +321,13 @@ int show_unhandled_signals = 1; static int __init crashinfo(char *str) { - unsigned long val; const char *word; if (*str == '\0') - val = 2; - else if (*str != '=' || strict_strtoul(++str, 0, &val) != 0) + show_unhandled_signals = 2; + else if (*str != '=' || kstrtoint(++str, 0, &show_unhandled_signals) != 0) return 0; - show_unhandled_signals = val; + switch (show_unhandled_signals) { case 0: word = "No"; diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 6b603d556ca6..f3ceb6308e42 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -42,10 +42,9 @@ static int __init setup_unaligned_fixup(char *str) * will still parse the instruction, then fire a SIGBUS with * the correct address from inside the single_step code. */ - long val; - if (strict_strtol(str, 0, &val) != 0) + if (kstrtoint(str, 0, &unaligned_fixup) != 0) return 0; - unaligned_fixup = val; + pr_info("Fixups for unaligned data accesses are %s\n", unaligned_fixup >= 0 ? (unaligned_fixup ? "enabled" : "disabled") : diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c index b030b4e78845..c02ea2a45f67 100644 --- a/arch/tile/kernel/unaligned.c +++ b/arch/tile/kernel/unaligned.c @@ -182,18 +182,7 @@ static void find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra, int i; uint64_t reg; uint64_t reg_map = 0, alias_reg_map = 0, map; - bool alias; - - *ra = -1; - *rb = -1; - - if (rd) - *rd = -1; - - *clob1 = -1; - *clob2 = -1; - *clob3 = -1; - alias = false; + bool alias = false; /* * Parse fault bundle, find potential used registers and mark @@ -569,7 +558,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, tilegx_bundle_bits bundle_2 = 0; /* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */ bool bundle_2_enable = true; - uint64_t ra, rb, rd = -1, clob1, clob2, clob3; + uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1; /* * Indicate if the unalign access * instruction's registers hit with diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 0fa1acfac79a..bfb3127b4df9 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -273,9 +273,9 @@ static pgprot_t __init init_pgprot(ulong address) /* * Otherwise we just hand out consecutive cpus. To avoid * requiring this function to hold state, we just walk forward from - * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach - * the requested address, while walking cpu home around kdata_mask. - * This is typically no more than a dozen or so iterations. + * __end_rodata by PAGE_SIZE, skipping the readonly and init data, to + * reach the requested address, while walking cpu home around + * kdata_mask. This is typically no more than a dozen or so iterations. */ page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK; BUG_ON(address < page || address >= (ulong)_end); @@ -912,7 +912,7 @@ static long __write_once initfree = 1; static int __init set_initfree(char *str) { long val; - if (strict_strtol(str, 0, &val) == 0) { + if (kstrtol(str, 0, &val) == 0) { initfree = val; pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); |