diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-07-02 02:47:51 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-07-02 02:47:51 +0200 |
commit | 2d01eedf1d14432f4db5388a49dc5596a8c5bd02 (patch) | |
tree | 646525acc0475b2899827c1bfbd25f05ec1b8092 | |
parent | Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/... (diff) | |
parent | genalloc: rename of_get_named_gen_pool() to of_gen_pool_get() (diff) | |
download | linux-2d01eedf1d14432f4db5388a49dc5596a8c5bd02.tar.xz linux-2d01eedf1d14432f4db5388a49dc5596a8c5bd02.zip |
Merge branch 'akpm' (patches from Andrew)
Merge third patchbomb from Andrew Morton:
- the rest of MM
- scripts/gdb updates
- ipc/ updates
- lib/ updates
- MAINTAINERS updates
- various other misc things
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (67 commits)
genalloc: rename of_get_named_gen_pool() to of_gen_pool_get()
genalloc: rename dev_get_gen_pool() to gen_pool_get()
x86: opt into HAVE_COPY_THREAD_TLS, for both 32-bit and 64-bit
MAINTAINERS: add zpool
MAINTAINERS: BCACHE: Kent Overstreet has changed email address
MAINTAINERS: move Jens Osterkamp to CREDITS
MAINTAINERS: remove unused nbd.h pattern
MAINTAINERS: update brcm gpio filename pattern
MAINTAINERS: update brcm dts pattern
MAINTAINERS: update sound soc intel patterns
MAINTAINERS: remove website for paride
MAINTAINERS: update Emulex ocrdma email addresses
bcache: use kvfree() in various places
libcxgbi: use kvfree() in cxgbi_free_big_mem()
target: use kvfree() in session alloc and free
IB/ehca: use kvfree() in ipz_queue_{cd}tor()
drm/nouveau/gem: use kvfree() in u_free()
drm: use kvfree() in drm_free_large()
cxgb4: use kvfree() in t4_free_mem()
cxgb3: use kvfree() in cxgb_free_mem()
...
74 files changed, 894 insertions, 339 deletions
@@ -2740,6 +2740,10 @@ S: C/ Mieses 20, 9-B S: Valladolid 47009 S: Spain +N: Jens Osterkamp +E: jens@de.ibm.com +D: Maintainer of Spidernet network driver for Cell + N: Gadi Oxman E: gadio@netvision.net.il D: Original author and maintainer of IDE/ATAPI floppy/tape drivers diff --git a/MAINTAINERS b/MAINTAINERS index ab6fb58b3873..058b0fbc52ff 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2026,10 +2026,10 @@ S: Maintained F: drivers/net/hamradio/baycom* BCACHE (BLOCK LAYER CACHE) -M: Kent Overstreet <kmo@daterainc.com> +M: Kent Overstreet <kent.overstreet@gmail.com> L: linux-bcache@vger.kernel.org W: http://bcache.evilpiepirate.org -S: Maintained: +S: Maintained F: drivers/md/bcache/ BDISP ST MEDIA DRIVER @@ -2280,7 +2280,7 @@ S: Maintained F: arch/mips/bmips/* F: arch/mips/include/asm/mach-bmips/* F: arch/mips/kernel/*bmips* -F: arch/mips/boot/dts/bcm*.dts* +F: arch/mips/boot/dts/brcm/bcm*.dts* F: drivers/irqchip/irq-bcm7* F: drivers/irqchip/irq-brcmstb* @@ -2339,7 +2339,7 @@ M: Ray Jui <rjui@broadcom.com> L: bcm-kernel-feedback-list@broadcom.com S: Supported F: drivers/gpio/gpio-bcm-kona.c -F: Documentation/devicetree/bindings/gpio/gpio-bcm-kona.txt +F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt BROADCOM NVRAM DRIVER M: Rafał Miłecki <zajec5@gmail.com> @@ -5285,11 +5285,10 @@ INTEL ASoC BDW/HSW DRIVERS M: Jie Yang <yang.jie@linux.intel.com> L: alsa-devel@alsa-project.org (moderated for non-subscribers) S: Supported -F: sound/soc/intel/sst-haswell* -F: sound/soc/intel/sst-dsp* -F: sound/soc/intel/sst-firmware.c -F: sound/soc/intel/broadwell.c -F: sound/soc/intel/haswell.c +F: sound/soc/intel/common/sst-dsp* +F: sound/soc/intel/common/sst-firmware.c +F: sound/soc/intel/boards/broadwell.c +F: sound/soc/intel/haswell/ INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support <intel-linux-scu@intel.com> @@ -7019,7 +7018,6 @@ L: nbd-general@lists.sourceforge.net T: git git://git.pengutronix.de/git/mpa/linux-nbd.git F: Documentation/blockdev/nbd.txt F: drivers/block/nbd.c -F: include/linux/nbd.h F: include/uapi/linux/nbd.h NETWORK DROP MONITOR @@ -7647,7 +7645,6 @@ F: arch/*/include/asm/paravirt.h PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES M: Tim Waugh <tim@cyberelk.net> L: linux-parport@lists.infradead.org (subscribers-only) -W: http://www.torque.net/linux-pp.html S: Maintained F: Documentation/blockdev/paride.txt F: drivers/block/paride/ @@ -9091,9 +9088,9 @@ S: Supported F: drivers/net/ethernet/emulex/benet/ EMULEX ONECONNECT ROCE DRIVER -M: Selvin Xavier <selvin.xavier@emulex.com> -M: Devesh Sharma <devesh.sharma@emulex.com> -M: Mitesh Ahuja <mitesh.ahuja@emulex.com> +M: Selvin Xavier <selvin.xavier@avagotech.com> +M: Devesh Sharma <devesh.sharma@avagotech.com> +M: Mitesh Ahuja <mitesh.ahuja@avagotech.com> L: linux-rdma@vger.kernel.org W: http://www.emulex.com S: Supported @@ -9593,7 +9590,6 @@ F: include/uapi/linux/spi/ SPIDERNET NETWORK DRIVER for CELL M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp> -M: Jens Osterkamp <jens@de.ibm.com> L: netdev@vger.kernel.org S: Supported F: Documentation/networking/spider_net.txt @@ -11354,6 +11350,13 @@ L: zd1211-devs@lists.sourceforge.net (subscribers-only) S: Maintained F: drivers/net/wireless/zd1211rw/ +ZPOOL COMPRESSED PAGE STORAGE API +M: Dan Streetman <ddstreet@ieee.org> +L: linux-mm@kvack.org +S: Maintained +F: mm/zpool.c +F: include/linux/zpool.h + ZR36067 VIDEO FOR LINUX DRIVER L: mjpeg-users@lists.sourceforge.net L: linux-media@vger.kernel.org diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index fd6cdb56d4fd..2d28ba939d8e 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -157,22 +157,24 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, } static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir) { int i; + struct scatterlist *sg; - for (i = 0; i < nelems; i++, sg++) + for_each_sg(sglist, sg, nelems, i) _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); } static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction dir) +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction dir) { int i; + struct scatterlist *sg; - for (i = 0; i < nelems; i++, sg++) + for_each_sg(sglist, sg, nelems, i) _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); } diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 1e184767c3be..e24df77abd79 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -369,7 +369,7 @@ static void __init at91_pm_sram_init(void) return; } - sram_pool = dev_get_gen_pool(&pdev->dev); + sram_pool = gen_pool_get(&pdev->dev); if (!sram_pool) { pr_warn("%s: sram pool unavailable!\n", __func__); return; diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c index 0309ccda36a9..1885676c23c0 100644 --- a/arch/arm/mach-imx/pm-imx5.c +++ b/arch/arm/mach-imx/pm-imx5.c @@ -297,7 +297,7 @@ static int __init imx_suspend_alloc_ocram( goto put_node; } - ocram_pool = dev_get_gen_pool(&pdev->dev); + ocram_pool = gen_pool_get(&pdev->dev); if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index b01650d94f91..93ecf559d06d 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -451,7 +451,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) goto put_node; } - ocram_pool = dev_get_gen_pool(&pdev->dev); + ocram_pool = gen_pool_get(&pdev->dev); if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c index 1ed89fc2b7a8..6a4199f2bffb 100644 --- a/arch/arm/mach-socfpga/pm.c +++ b/arch/arm/mach-socfpga/pm.c @@ -56,7 +56,7 @@ static int socfpga_setup_ocram_self_refresh(void) goto put_node; } - ocram_pool = dev_get_gen_pool(&pdev->dev); + ocram_pool = gen_pool_get(&pdev->dev); if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index ea21d4cad540..aa19b7ac8222 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -58,27 +58,22 @@ paddr_to_nid(unsigned long paddr) * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where * the section resides. */ -int __meminit __early_pfn_to_nid(unsigned long pfn) +int __meminit __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state) { int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; - /* - * NOTE: The following SMP-unsafe globals are only used early in boot - * when the kernel is running single-threaded. - */ - static int __meminitdata last_ssec, last_esec; - static int __meminitdata last_nid; - if (section >= last_ssec && section < last_esec) - return last_nid; + if (section >= state->last_start && section < state->last_end) + return state->last_nid; for (i = 0; i < num_node_memblks; i++) { ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; esec = (node_memblk[i].start_paddr + node_memblk[i].size + ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; if (section >= ssec && section < esec) { - last_ssec = ssec; - last_esec = esec; - last_nid = node_memblk[i].nid; + state->last_start = ssec; + state->last_end = esec; + state->last_nid = node_memblk[i].nid; return node_memblk[i].nid; } } diff --git a/arch/unicore32/kernel/fpu-ucf64.c b/arch/unicore32/kernel/fpu-ucf64.c index 282a60ac82ba..a53343a90ca2 100644 --- a/arch/unicore32/kernel/fpu-ucf64.c +++ b/arch/unicore32/kernel/fpu-ucf64.c @@ -90,8 +90,8 @@ void ucf64_exchandler(u32 inst, u32 fpexc, struct pt_regs *regs) tmp &= ~(FPSCR_CON); exc &= ~(FPSCR_CMPINSTR_BIT | FPSCR_CON); } else { - pr_debug(KERN_ERR "UniCore-F64 Error: unhandled exceptions\n"); - pr_debug(KERN_ERR "UniCore-F64 FPSCR 0x%08x INST 0x%08x\n", + pr_debug("UniCore-F64 Error: unhandled exceptions\n"); + pr_debug("UniCore-F64 FPSCR 0x%08x INST 0x%08x\n", cff(FPSCR), inst); ucf64_raise_sigfpe(0, regs); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d05a42357ef0..55bced17dc95 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -34,6 +34,7 @@ config X86 select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT select ARCH_SUPPORTS_INT128 if X86_64 select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 select ARCH_USE_BUILTIN_BSWAP @@ -87,6 +88,7 @@ config X86 select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING if X86_64 + select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index ca05f86481aa..ca83f7ac388b 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -72,15 +72,16 @@ static int setup_cmdline(struct kimage *image, struct boot_params *params, unsigned long cmdline_len) { char *cmdline_ptr = ((char *)params) + cmdline_offset; - unsigned long cmdline_ptr_phys, len; + unsigned long cmdline_ptr_phys, len = 0; uint32_t cmdline_low_32, cmdline_ext_32; - memcpy(cmdline_ptr, cmdline, cmdline_len); if (image->type == KEXEC_TYPE_CRASH) { - len = sprintf(cmdline_ptr + cmdline_len - 1, - " elfcorehdr=0x%lx", image->arch.elf_load_addr); - cmdline_len += len; + len = sprintf(cmdline_ptr, + "elfcorehdr=0x%lx ", image->arch.elf_load_addr); } + memcpy(cmdline_ptr + len, cmdline, cmdline_len); + cmdline_len += len; + cmdline_ptr[cmdline_len - 1] = '\0'; pr_debug("Final command line is: %s\n", cmdline_ptr); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index c09c99ccf3e3..f73c962fe636 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -128,8 +128,8 @@ void release_thread(struct task_struct *dead_task) release_vm86_irqs(dead_task); } -int copy_thread(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long sp, + unsigned long arg, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); struct task_struct *tsk; @@ -184,7 +184,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, */ if (clone_flags & CLONE_SETTLS) err = do_set_thread_area(p, -1, - (struct user_desc __user *)childregs->si, 0); + (struct user_desc __user *)tls, 0); if (err && p->thread.io_bitmap_ptr) { kfree(p->thread.io_bitmap_ptr); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 843f92e4c711..71d7849a07f7 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -150,8 +150,8 @@ static inline u32 read_32bit_tls(struct task_struct *t, int tls) return get_desc_base(&t->thread.tls_array[tls]); } -int copy_thread(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long sp, + unsigned long arg, struct task_struct *p, unsigned long tls) { int err; struct pt_regs *childregs; @@ -207,10 +207,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, #ifdef CONFIG_IA32_EMULATION if (is_ia32_task()) err = do_set_thread_area(p, -1, - (struct user_desc __user *)childregs->si, 0); + (struct user_desc __user *)tls, 0); else #endif - err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); + err = do_arch_prctl(p, ARCH_SET_FS, tls); if (err) goto out; } diff --git a/drivers/base/node.c b/drivers/base/node.c index a2aa65b4215d..31df474d72f4 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -359,12 +359,16 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE #define page_initialized(page) (page->lru.next) -static int get_nid_for_pfn(unsigned long pfn) +static int __init_refok get_nid_for_pfn(unsigned long pfn) { struct page *page; if (!pfn_valid_within(pfn)) return -1; +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + if (system_state == SYSTEM_BOOTING) + return early_pfn_to_nid(pfn); +#endif page = pfn_to_page(pfn); if (!page_initialized(page)) return -1; diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index a432633bced4..1c6f98dd88f4 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -321,9 +321,8 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx) const char *res_name = "sram"; struct resource *res; - engine->pool = of_get_named_gen_pool(cesa->dev->of_node, - "marvell,crypto-srams", - idx); + engine->pool = of_gen_pool_get(cesa->dev->of_node, + "marvell,crypto-srams", idx); if (engine->pool) { engine->sram = gen_pool_dma_alloc(engine->pool, cesa->sram_size, diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 449e785def17..e683761e0f8f 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -657,7 +657,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) INIT_LIST_HEAD(&tdev->device.channels); if (pdev->dev.of_node) - pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0); + pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0); else pool = sram_get_gpool("asram"); if (!pool) { diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0e690bf19fc9..af1ee517f372 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -555,10 +555,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, static inline void u_free(void *addr) { - if (!is_vmalloc_addr(addr)) - kfree(addr); - else - vfree(addr); + kvfree(addr); } static inline void * diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c index 8d594517cd29..7ffc748cb973 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c @@ -245,10 +245,7 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, ipz_queue_ctor_exit0: ehca_gen_err("Couldn't alloc pages queue=%p " "nr_of_pages=%x", queue, nr_of_pages); - if (is_vmalloc_addr(queue->queue_pages)) - vfree(queue->queue_pages); - else - kfree(queue->queue_pages); + kvfree(queue->queue_pages); return 0; } @@ -270,10 +267,7 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue) free_page((unsigned long)queue->queue_pages[i]); } - if (is_vmalloc_addr(queue->queue_pages)) - vfree(queue->queue_pages); - else - kfree(queue->queue_pages); + kvfree(queue->queue_pages); return 1; } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index fe080ad0e558..ce64fc851251 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -157,7 +157,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) for_each_cache(ca, c, iter) { struct journal_device *ja = &ca->journal; - unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG]; + DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS); unsigned i, l, r, m; uint64_t seq; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 4dd2bb7167f0..94980bfca434 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -760,14 +760,8 @@ static void bcache_device_free(struct bcache_device *d) bio_split_pool_free(&d->bio_split_hook); if (d->bio_split) bioset_free(d->bio_split); - if (is_vmalloc_addr(d->full_dirty_stripes)) - vfree(d->full_dirty_stripes); - else - kfree(d->full_dirty_stripes); - if (is_vmalloc_addr(d->stripe_sectors_dirty)) - vfree(d->stripe_sectors_dirty); - else - kfree(d->stripe_sectors_dirty); + kvfree(d->full_dirty_stripes); + kvfree(d->stripe_sectors_dirty); closure_debug_destroy(&d->cl); } diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 98df7572b5f7..1d04c4859c70 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -52,10 +52,7 @@ struct closure; #define free_heap(heap) \ do { \ - if (is_vmalloc_addr((heap)->data)) \ - vfree((heap)->data); \ - else \ - kfree((heap)->data); \ + kvfree((heap)->data); \ (heap)->data = NULL; \ } while (0) @@ -163,10 +160,7 @@ do { \ #define free_fifo(fifo) \ do { \ - if (is_vmalloc_addr((fifo)->data)) \ - vfree((fifo)->data); \ - else \ - kfree((fifo)->data); \ + kvfree((fifo)->data); \ (fifo)->data = NULL; \ } while (0) diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 6d6e0ca91fb4..58f65486de33 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -2155,9 +2155,9 @@ static int coda_probe(struct platform_device *pdev) } /* Get IRAM pool from device tree or platform data */ - pool = of_get_named_gen_pool(np, "iram", 0); + pool = of_gen_pool_get(np, "iram", 0); if (!pool && pdata) - pool = dev_get_gen_pool(pdata->iram_dev); + pool = gen_pool_get(pdata->iram_dev); if (!pool) { dev_err(&pdev->dev, "iram pool not available\n"); return -ENOMEM; diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index aeabaa5aedf7..48db922075e2 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -419,10 +419,10 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh) } if (host->cmd_flags & DMA_DATA) { - if (1 != pci_map_sg(host->chip->pdev, &host->req->sg, 1, + if (1 != dma_map_sg(&host->chip->pdev->dev, &host->req->sg, 1, host->req->data_dir == READ - ? PCI_DMA_FROMDEVICE - : PCI_DMA_TODEVICE)) { + ? DMA_FROM_DEVICE + : DMA_TO_DEVICE)) { host->req->error = -ENOMEM; return host->req->error; } @@ -487,9 +487,9 @@ static void jmb38x_ms_complete_cmd(struct memstick_host *msh, int last) writel(0, host->addr + DMA_CONTROL); if (host->cmd_flags & DMA_DATA) { - pci_unmap_sg(host->chip->pdev, &host->req->sg, 1, + dma_unmap_sg(&host->chip->pdev->dev, &host->req->sg, 1, host->req->data_dir == READ - ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); + ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { t_val = readl(host->addr + INT_STATUS_ENABLE); if (host->req->data_dir == READ) @@ -925,7 +925,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev, int pci_dev_busy = 0; int rc, cnt; - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (rc) return rc; diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index e2a4f5f415b2..ef09ba0289d7 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c @@ -754,7 +754,7 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto error2; pci_set_master(pdev); - error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (error) goto error3; @@ -787,8 +787,8 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* This is just a precation, so don't fail */ - dev->dummy_dma_page = pci_alloc_consistent(pdev, PAGE_SIZE, - &dev->dummy_dma_page_physical_address); + dev->dummy_dma_page = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &dev->dummy_dma_page_physical_address, GFP_KERNEL); r592_stop_dma(dev , 0); if (request_irq(dev->irq, &r592_irq, IRQF_SHARED, @@ -805,7 +805,7 @@ error7: free_irq(dev->irq, dev); error6: if (dev->dummy_dma_page) - pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page, dev->dummy_dma_page_physical_address); kthread_stop(dev->io_thread); @@ -845,7 +845,7 @@ static void r592_remove(struct pci_dev *pdev) memstick_free_host(dev->host); if (dev->dummy_dma_page) - pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page, dev->dummy_dma_page_physical_address); } diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index b0cbb2b7fd48..76684dcb874c 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -1169,10 +1169,7 @@ void *cxgb_alloc_mem(unsigned long size) */ void cxgb_free_mem(void *addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); + kvfree(addr); } /* diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c64b5a99bfef..351f3b1bf800 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1150,10 +1150,7 @@ void *t4_alloc_mem(size_t size) */ void t4_free_mem(void *addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); + kvfree(addr); } /* Send a Work Request to write the filter at a specified index. We construct diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index b3e5bd1d5d9c..9842301f7980 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -685,10 +685,7 @@ static inline void *cxgbi_alloc_big_mem(unsigned int size, static inline void cxgbi_free_big_mem(void *addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); + kvfree(addr); } static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 1f8e2dc9c616..30268bb2ddb6 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -2363,17 +2363,13 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) u64 block, rest = 0; struct scsi_data_buffer *sdb; enum dma_data_direction dir; - size_t (*func)(struct scatterlist *, unsigned int, void *, size_t, - off_t); if (do_write) { sdb = scsi_out(scmd); dir = DMA_TO_DEVICE; - func = sg_pcopy_to_buffer; } else { sdb = scsi_in(scmd); dir = DMA_FROM_DEVICE; - func = sg_pcopy_from_buffer; } if (!sdb->length) @@ -2385,16 +2381,16 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) if (block + num > sdebug_store_sectors) rest = block + num - sdebug_store_sectors; - ret = func(sdb->table.sgl, sdb->table.nents, + ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, fake_storep + (block * scsi_debug_sector_size), - (num - rest) * scsi_debug_sector_size, 0); + (num - rest) * scsi_debug_sector_size, 0, do_write); if (ret != (num - rest) * scsi_debug_sector_size) return ret; if (rest) { - ret += func(sdb->table.sgl, sdb->table.nents, + ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, fake_storep, rest * scsi_debug_sector_size, - (num - rest) * scsi_debug_sector_size); + (num - rest) * scsi_debug_sector_size, do_write); } return ret; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0b4e24217564..cd3bfc16d25f 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -279,10 +279,7 @@ int transport_alloc_session_tags(struct se_session *se_sess, if (rc < 0) { pr_err("Unable to init se_sess->sess_tag_pool," " tag_num: %u\n", tag_num); - if (is_vmalloc_addr(se_sess->sess_cmd_map)) - vfree(se_sess->sess_cmd_map); - else - kfree(se_sess->sess_cmd_map); + kvfree(se_sess->sess_cmd_map); se_sess->sess_cmd_map = NULL; return -ENOMEM; } @@ -489,10 +486,7 @@ void transport_free_session(struct se_session *se_sess) { if (se_sess->sess_cmd_map) { percpu_ida_destroy(&se_sess->sess_tag_pool); - if (is_vmalloc_addr(se_sess->sess_cmd_map)) - vfree(se_sess->sess_cmd_map); - else - kfree(se_sess->sess_cmd_map); + kvfree(se_sess->sess_cmd_map); } kmem_cache_free(se_sess_cache, se_sess); } diff --git a/fs/adfs/super.c b/fs/adfs/super.c index a19c31d3f369..4d4a0df8344f 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -242,7 +242,7 @@ static struct kmem_cache *adfs_inode_cachep; static struct inode *adfs_alloc_inode(struct super_block *sb) { struct adfs_inode_info *ei; - ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL); + ei = kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index a8f463c028ce..5fa92bc790ef 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -140,7 +140,7 @@ affs_remove_link(struct dentry *dentry) { struct inode *dir, *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; - struct buffer_head *bh = NULL, *link_bh = NULL; + struct buffer_head *bh, *link_bh = NULL; u32 link_ino, ino; int retval; diff --git a/fs/affs/inode.c b/fs/affs/inode.c index a022f4accd76..17349500592d 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -346,7 +346,7 @@ affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s3 { struct super_block *sb = dir->i_sb; struct buffer_head *inode_bh = NULL; - struct buffer_head *bh = NULL; + struct buffer_head *bh; u32 block = 0; int retval; diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c index f39b71c3981e..ea5b69a18ba9 100644 --- a/fs/affs/symlink.c +++ b/fs/affs/symlink.c @@ -16,14 +16,12 @@ static int affs_symlink_readpage(struct file *file, struct page *page) struct inode *inode = page->mapping->host; char *link = kmap(page); struct slink_front *lf; - int err; int i, j; char c; char lc; pr_debug("follow_link(ino=%lu)\n", inode->i_ino); - err = -EIO; bh = affs_bread(inode->i_sb, inode->i_ino); if (!bh) goto fail; @@ -66,7 +64,7 @@ fail: SetPageError(page); kunmap(page); unlock_page(page); - return err; + return -EIO; } const struct address_space_operations affs_symlink_aops = { diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index add566303c68..c35ffdc12bba 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -142,6 +142,8 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode) if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) return inode->i_sb; #endif + if (!devpts_mnt) + return NULL; return devpts_mnt->mnt_sb; } @@ -525,10 +527,14 @@ static struct file_system_type devpts_fs_type = { int devpts_new_index(struct inode *ptmx_inode) { struct super_block *sb = pts_sb_from_inode(ptmx_inode); - struct pts_fs_info *fsi = DEVPTS_SB(sb); + struct pts_fs_info *fsi; int index; int ida_ret; + if (!sb) + return -ENODEV; + + fsi = DEVPTS_SB(sb); retry: if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) return -ENOMEM; @@ -584,11 +590,18 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, struct dentry *dentry; struct super_block *sb = pts_sb_from_inode(ptmx_inode); struct inode *inode; - struct dentry *root = sb->s_root; - struct pts_fs_info *fsi = DEVPTS_SB(sb); - struct pts_mount_opts *opts = &fsi->mount_opts; + struct dentry *root; + struct pts_fs_info *fsi; + struct pts_mount_opts *opts; char s[12]; + if (!sb) + return ERR_PTR(-ENODEV); + + root = sb->s_root; + fsi = DEVPTS_SB(sb); + opts = &fsi->mount_opts; + inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); @@ -676,12 +689,16 @@ static int __init init_devpts_fs(void) struct ctl_table_header *table; if (!err) { + struct vfsmount *mnt; + table = register_sysctl_table(pty_root_table); - devpts_mnt = kern_mount(&devpts_fs_type); - if (IS_ERR(devpts_mnt)) { - err = PTR_ERR(devpts_mnt); + mnt = kern_mount(&devpts_fs_type); + if (IS_ERR(mnt)) { + err = PTR_ERR(mnt); unregister_filesystem(&devpts_fs_type); unregister_sysctl_table(table); + } else { + devpts_mnt = mnt; } } return err; diff --git a/fs/mount.h b/fs/mount.h index b5b8082bfa42..14db05d424f7 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -118,7 +118,6 @@ static inline void unlock_mount_hash(void) } struct proc_mounts { - struct seq_file m; struct mnt_namespace *ns; struct path root; int (*show)(struct seq_file *, struct vfsmount *); @@ -127,8 +126,6 @@ struct proc_mounts { loff_t cached_index; }; -#define proc_mounts(p) (container_of((p), struct proc_mounts, m)) - extern const struct seq_operations mounts_op; extern bool __is_local_mountpoint(struct dentry *dentry); diff --git a/fs/namespace.c b/fs/namespace.c index 9c1c43d0d4f1..e99f1f4e00cd 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1226,7 +1226,7 @@ EXPORT_SYMBOL(replace_mount_options); /* iterator; we want it to have access to namespace_sem, thus here... */ static void *m_start(struct seq_file *m, loff_t *pos) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; down_read(&namespace_sem); if (p->cached_event == p->ns->event) { @@ -1247,7 +1247,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) static void *m_next(struct seq_file *m, void *v, loff_t *pos) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; p->cached_mount = seq_list_next(v, &p->ns->list, pos); p->cached_index = *pos; @@ -1261,7 +1261,7 @@ static void m_stop(struct seq_file *m, void *v) static int m_show(struct seq_file *m, void *v) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; struct mount *r = list_entry(v, struct mount, mnt_list); return p->show(m, &r->mnt); } diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 8db932da4009..8ebd9a334085 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c @@ -17,7 +17,8 @@ static unsigned mounts_poll(struct file *file, poll_table *wait) { - struct proc_mounts *p = proc_mounts(file->private_data); + struct seq_file *m = file->private_data; + struct proc_mounts *p = m->private; struct mnt_namespace *ns = p->ns; unsigned res = POLLIN | POLLRDNORM; int event; @@ -25,8 +26,8 @@ static unsigned mounts_poll(struct file *file, poll_table *wait) poll_wait(file, &p->ns->poll, wait); event = ACCESS_ONCE(ns->event); - if (p->m.poll_event != event) { - p->m.poll_event = event; + if (m->poll_event != event) { + m->poll_event = event; res |= POLLERR | POLLPRI; } @@ -92,7 +93,7 @@ static void show_type(struct seq_file *m, struct super_block *sb) static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; struct mount *r = real_mount(mnt); int err = 0; struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; @@ -126,7 +127,7 @@ out: static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; struct mount *r = real_mount(mnt); struct super_block *sb = mnt->mnt_sb; struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; @@ -186,7 +187,7 @@ out: static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; struct mount *r = real_mount(mnt); struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; struct super_block *sb = mnt_path.dentry->d_sb; @@ -236,6 +237,7 @@ static int mounts_open_common(struct inode *inode, struct file *file, struct mnt_namespace *ns = NULL; struct path root; struct proc_mounts *p; + struct seq_file *m; int ret = -EINVAL; if (!task) @@ -260,26 +262,21 @@ static int mounts_open_common(struct inode *inode, struct file *file, task_unlock(task); put_task_struct(task); - ret = -ENOMEM; - p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); - if (!p) + ret = seq_open_private(file, &mounts_op, sizeof(struct proc_mounts)); + if (ret) goto err_put_path; - file->private_data = &p->m; - ret = seq_open(file, &mounts_op); - if (ret) - goto err_free; + m = file->private_data; + m->poll_event = ns->event; + p = m->private; p->ns = ns; p->root = root; - p->m.poll_event = ns->event; p->show = show; p->cached_event = ~0ULL; return 0; - err_free: - kfree(p); err_put_path: path_put(&root); err_put_ns: @@ -290,10 +287,11 @@ static int mounts_open_common(struct inode *inode, struct file *file, static int mounts_release(struct inode *inode, struct file *file) { - struct proc_mounts *p = proc_mounts(file->private_data); + struct seq_file *m = file->private_data; + struct proc_mounts *p = m->private; path_put(&p->root); put_mnt_ns(p->ns); - return seq_release(inode, file); + return seq_release_private(inode, file); } static int mounts_open(struct inode *inode, struct file *file) diff --git a/fs/seq_file.c b/fs/seq_file.c index 52b492721603..1d9c1cbd4d0b 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -48,18 +48,21 @@ static void *seq_buf_alloc(unsigned long size) * ERR_PTR(error). In the end of sequence they return %NULL. ->show() * returns 0 in case of success and negative number in case of error. * Returning SEQ_SKIP means "discard this element and move on". + * Note: seq_open() will allocate a struct seq_file and store its + * pointer in @file->private_data. This pointer should not be modified. */ int seq_open(struct file *file, const struct seq_operations *op) { - struct seq_file *p = file->private_data; + struct seq_file *p; + + WARN_ON(file->private_data); + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + + file->private_data = p; - if (!p) { - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return -ENOMEM; - file->private_data = p; - } - memset(p, 0, sizeof(*p)); mutex_init(&p->lock); p->op = op; #ifdef CONFIG_USER_NS diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h index 19a240446fca..e42495ad8136 100644 --- a/include/drm/drm_mem_util.h +++ b/include/drm/drm_mem_util.h @@ -56,10 +56,7 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) static __inline void drm_free_large(void *ptr) { - if (!is_vmalloc_addr(ptr)) - return kfree(ptr); - - vfree(ptr); + kvfree(ptr); } #endif diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 1ccaab44abcc..5383bb1394a1 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -119,16 +119,16 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, extern struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid); -extern struct gen_pool *dev_get_gen_pool(struct device *dev); +extern struct gen_pool *gen_pool_get(struct device *dev); bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, size_t size); #ifdef CONFIG_OF -extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, +extern struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index); #else -static inline struct gen_pool *of_get_named_gen_pool(struct device_node *np, +static inline struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index) { return NULL; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 6ba7cf23748f..ad35f300b9a4 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -384,6 +384,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(struct zone *zone); void drain_local_pages(struct zone *zone); +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +void page_alloc_init_late(void); +#else +static inline void page_alloc_init_late(void) +{ +} +#endif + /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what * GFP flags are used before interrupts are enabled. Once interrupts are diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cfa9351c7536..5f0be58640ea 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -439,6 +439,9 @@ extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; extern int panic_on_warn; extern int sysctl_panic_on_stackoverflow; + +extern bool crash_kexec_post_notifiers; + /* * Only to be used by arch init code. If the user over-wrote the default * CONFIG_PANIC_TIMEOUT, honor it. diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 0215ffd63069..cc4b01972060 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -101,6 +101,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid); +void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, + phys_addr_t *out_end); + /** * for_each_mem_range - iterate through memblock areas from type_a and not * included in type_b. Or just type_a if type_b is NULL. @@ -142,6 +145,21 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags, __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ p_start, p_end, p_nid)) +/** + * for_each_reserved_mem_region - iterate over all reserved memblock areas + * @i: u64 used as loop variable + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * + * Walks over reserved areas of memblock. Available as soon as memblock + * is initialized. + */ +#define for_each_reserved_mem_region(i, p_start, p_end) \ + for (i = 0UL, \ + __next_reserved_mem_region(&i, p_start, p_end); \ + i != (u64)ULLONG_MAX; \ + __next_reserved_mem_region(&i, p_start, p_end)) + #ifdef CONFIG_MOVABLE_NODE static inline bool memblock_is_hotpluggable(struct memblock_region *m) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 99959a34f4f1..2e872f92dbac 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1635,6 +1635,8 @@ extern void free_highmem_page(struct page *page); extern void adjust_managed_page_count(struct page *page, long count); extern void mem_init_print_info(const char *str); +extern void reserve_bootmem_region(unsigned long start, unsigned long end); + /* Free the reserved page into the buddy system, so it gets managed. */ static inline void __free_reserved_page(struct page *page) { @@ -1724,7 +1726,8 @@ extern void sparse_memory_present_with_active_regions(int nid); #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) -static inline int __early_pfn_to_nid(unsigned long pfn) +static inline int __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state) { return 0; } @@ -1732,7 +1735,8 @@ static inline int __early_pfn_to_nid(unsigned long pfn) /* please see mm/page_alloc.c */ extern int __meminit early_pfn_to_nid(unsigned long pfn); /* there is a per-arch backend function. */ -extern int __meminit __early_pfn_to_nid(unsigned long pfn); +extern int __meminit __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 54d74f6eb233..754c25966a0a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -762,6 +762,14 @@ typedef struct pglist_data { /* Number of pages migrated during the rate limiting time interval */ unsigned long numabalancing_migrate_nr_pages; #endif + +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + /* + * If memory initialisation on large machines is deferred then this + * is the first PFN that needs to be initialised. + */ + unsigned long first_deferred_pfn; +#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) @@ -1216,11 +1224,16 @@ void sparse_init(void); #define sparse_index_init(_sec, _nid) do {} while (0) #endif /* CONFIG_SPARSEMEM */ -#ifdef CONFIG_NODES_SPAN_OTHER_NODES -bool early_pfn_in_nid(unsigned long pfn, int nid); -#else -#define early_pfn_in_nid(pfn, nid) (1) -#endif +/* + * During memory init memblocks map pfns to nids. The search is expensive and + * this caches recent lookups. The implementation of __early_pfn_to_nid + * may treat start/end as pfns or sections. + */ +struct mminit_pfnnid_cache { + unsigned long last_start; + unsigned long last_end; + int last_nid; +}; #ifndef early_pfn_valid #define early_pfn_valid(pfn) (1) diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 50a8486c524b..9b1ef0c820a7 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -265,13 +265,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, unsigned long offset, unsigned long size, gfp_t gfp_mask); +size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, + size_t buflen, off_t skip, bool to_buffer); + size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen); + const void *buf, size_t buflen); size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen); size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, off_t skip); + const void *buf, size_t buflen, off_t skip); size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, off_t skip); diff --git a/init/main.c b/init/main.c index c599aea23bb1..c5d5626289ce 100644 --- a/init/main.c +++ b/init/main.c @@ -1004,6 +1004,8 @@ static noinline void __init kernel_init_freeable(void) smp_init(); sched_init_smp(); + page_alloc_init_late(); + do_basic_setup(); /* Open the /dev/console on the rootfs, this should never fail */ diff --git a/ipc/msg.c b/ipc/msg.c index 2b6fdbb9e0e9..66c4f567eb73 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -76,7 +76,7 @@ struct msg_sender { static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&msg_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -196,7 +196,7 @@ static void expunge_all(struct msg_queue *msq, int res) * or dealing with -EAGAIN cases. See lockless receive part 1 * and 2 in do_msgrcv(). */ - smp_mb(); + smp_wmb(); /* barrier (B) */ msr->r_msg = ERR_PTR(res); } } @@ -580,7 +580,8 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) /* initialize pipelined send ordering */ msr->r_msg = NULL; wake_up_process(msr->r_tsk); - smp_mb(); /* see barrier comment below */ + /* barrier (B) see barrier comment below */ + smp_wmb(); msr->r_msg = ERR_PTR(-E2BIG); } else { msr->r_msg = NULL; @@ -589,11 +590,12 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) wake_up_process(msr->r_tsk); /* * Ensure that the wakeup is visible before - * setting r_msg, as the receiving end depends - * on it. See lockless receive part 1 and 2 in - * do_msgrcv(). + * setting r_msg, as the receiving can otherwise + * exit - once r_msg is set, the receiver can + * continue. See lockless receive part 1 and 2 + * in do_msgrcv(). Barrier (B). */ - smp_mb(); + smp_wmb(); msr->r_msg = msg; return 1; @@ -932,12 +934,38 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl /* Lockless receive, part 2: * Wait until pipelined_send or expunge_all are outside of * wake_up_process(). There is a race with exit(), see - * ipc/mqueue.c for the details. + * ipc/mqueue.c for the details. The correct serialization + * ensures that a receiver cannot continue without the wakeup + * being visibible _before_ setting r_msg: + * + * CPU 0 CPU 1 + * <loop receiver> + * smp_rmb(); (A) <-- pair -. <waker thread> + * <load ->r_msg> | msr->r_msg = NULL; + * | wake_up_process(); + * <continue> `------> smp_wmb(); (B) + * msr->r_msg = msg; + * + * Where (A) orders the message value read and where (B) orders + * the write to the r_msg -- done in both pipelined_send and + * expunge_all. */ - msg = (struct msg_msg *)msr_d.r_msg; - while (msg == NULL) { - cpu_relax(); + for (;;) { + /* + * Pairs with writer barrier in pipelined_send + * or expunge_all. + */ + smp_rmb(); /* barrier (A) */ msg = (struct msg_msg *)msr_d.r_msg; + if (msg) + break; + + /* + * The cpu_relax() call is a compiler barrier + * which forces everything in this loop to be + * re-loaded. + */ + cpu_relax(); } /* Lockless receive, part 3: diff --git a/ipc/sem.c b/ipc/sem.c index d1a6edd17eba..bc3d530cb23e 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -391,7 +391,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp; struct sem_array *sma; - ipcp = ipc_obtain_object(&sem_ids(ns), id); + ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -410,7 +410,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); diff --git a/ipc/shm.c b/ipc/shm.c index 6d767071c367..06e5cf2fe019 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -129,7 +129,7 @@ void __init shm_init(void) static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -155,8 +155,11 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); - if (IS_ERR(ipcp)) - return (struct shmid_kernel *)ipcp; + /* + * We raced in the idr lookup or with shm_destroy(). Either way, the + * ID is busted. + */ + BUG_ON(IS_ERR(ipcp)); return container_of(ipcp, struct shmid_kernel, shm_perm); } @@ -191,7 +194,6 @@ static void shm_open(struct vm_area_struct *vma) struct shmid_kernel *shp; shp = shm_lock(sfd->ns, sfd->id); - BUG_ON(IS_ERR(shp)); shp->shm_atim = get_seconds(); shp->shm_lprid = task_tgid_vnr(current); shp->shm_nattch++; @@ -258,7 +260,6 @@ static void shm_close(struct vm_area_struct *vma) down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); - BUG_ON(IS_ERR(shp)); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; @@ -1191,7 +1192,6 @@ out_fput: out_nattch: down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); - BUG_ON(IS_ERR(shp)); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); diff --git a/ipc/util.c b/ipc/util.c index ff3323ef8d8b..be4230020a1f 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -467,10 +467,7 @@ void ipc_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); - if (is_vmalloc_addr(p)) - vfree(p); - else - kfree(p); + kvfree(p); } /** @@ -558,7 +555,7 @@ void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out) * Call inside the RCU critical section. * The ipc object is *not* locked on exit. */ -struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) +struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id) { struct kern_ipc_perm *out; int lid = ipcid_to_idx(id); @@ -584,21 +581,24 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) struct kern_ipc_perm *out; rcu_read_lock(); - out = ipc_obtain_object(ids, id); + out = ipc_obtain_object_idr(ids, id); if (IS_ERR(out)) - goto err1; + goto err; spin_lock(&out->lock); - /* ipc_rmid() may have already freed the ID while ipc_lock - * was spinning: here verify that the structure is still valid + /* + * ipc_rmid() may have already freed the ID while ipc_lock() + * was spinning: here verify that the structure is still valid. + * Upon races with RMID, return -EIDRM, thus indicating that + * the ID points to a removed identifier. */ if (ipc_valid_object(out)) return out; spin_unlock(&out->lock); - out = ERR_PTR(-EINVAL); -err1: + out = ERR_PTR(-EIDRM); +err: rcu_read_unlock(); return out; } @@ -608,7 +608,7 @@ err1: * @ids: ipc identifier set * @id: ipc id to look for * - * Similar to ipc_obtain_object() but also checks + * Similar to ipc_obtain_object_idr() but also checks * the ipc object reference counter. * * Call inside the RCU critical section. @@ -616,13 +616,13 @@ err1: */ struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id) { - struct kern_ipc_perm *out = ipc_obtain_object(ids, id); + struct kern_ipc_perm *out = ipc_obtain_object_idr(ids, id); if (IS_ERR(out)) goto out; if (ipc_checkid(out, id)) - return ERR_PTR(-EIDRM); + return ERR_PTR(-EINVAL); out: return out; } diff --git a/ipc/util.h b/ipc/util.h index 1a5a0fcd099c..3a8a5a0eca62 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -132,7 +132,7 @@ void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); void ipc_rcu_free(struct rcu_head *head); struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); -struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); +struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id); void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c index a744098e4eb7..7080ae1eb6c1 100644 --- a/kernel/gcov/base.c +++ b/kernel/gcov/base.c @@ -92,6 +92,12 @@ void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters) } EXPORT_SYMBOL(__gcov_merge_time_profile); +void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) +{ + /* Unused. */ +} +EXPORT_SYMBOL(__gcov_merge_icall_topn); + /** * gcov_enable_events - enable event reporting through gcov_event() * diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index 826ba9fb5e32..e25e92fb44fa 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -18,7 +18,9 @@ #include <linux/vmalloc.h> #include "gcov.h" -#if __GNUC__ == 4 && __GNUC_MINOR__ >= 9 +#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1 +#define GCOV_COUNTERS 10 +#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 #define GCOV_COUNTERS 9 #else #define GCOV_COUNTERS 8 diff --git a/kernel/kexec.c b/kernel/kexec.c index 7a36fdcca5bf..a785c1015e25 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -84,6 +84,17 @@ struct resource crashk_low_res = { int kexec_should_crash(struct task_struct *p) { + /* + * If crash_kexec_post_notifiers is enabled, don't run + * crash_kexec() here yet, which must be run after panic + * notifiers in panic(). + */ + if (crash_kexec_post_notifiers) + return 0; + /* + * There are 4 panic() calls in do_exit() path, each of which + * corresponds to each of these 4 conditions. + */ if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) return 1; return 0; diff --git a/kernel/panic.c b/kernel/panic.c index 8136ad76e5fd..04e91ff7560b 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -32,7 +32,7 @@ static unsigned long tainted_mask; static int pause_on_oops; static int pause_on_oops_flag; static DEFINE_SPINLOCK(pause_on_oops_lock); -static bool crash_kexec_post_notifiers; +bool crash_kexec_post_notifiers; int panic_on_warn __read_mostly; int panic_timeout = CONFIG_PANIC_TIMEOUT; @@ -142,7 +142,8 @@ void panic(const char *fmt, ...) * Note: since some panic_notifiers can make crashed kernel * more unstable, it can increase risks of the kdump failure too. */ - crash_kexec(NULL); + if (crash_kexec_post_notifiers) + crash_kexec(NULL); bust_spinlocks(0); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index de553849f3ac..cf8c24203368 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -207,14 +207,14 @@ static int console_may_schedule; * need to be changed in the future, when the requirements change. * * /dev/kmsg exports the structured data in the following line format: - * "<level>,<sequnum>,<timestamp>,<contflag>;<message text>\n" + * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n" + * + * Users of the export format should ignore possible additional values + * separated by ',', and find the message after the ';' character. * * The optional key/value pairs are attached as continuation lines starting * with a space character and terminated by a newline. All possible * non-prinatable characters are escaped in the "\xff" notation. - * - * Users of the export format should ignore possible additional values - * separated by ',', and find the message after the ';' character. */ enum log_flags { diff --git a/kernel/relay.c b/kernel/relay.c index e9dbaeb8fd65..0b4570cfacae 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -81,10 +81,7 @@ static struct page **relay_alloc_page_array(unsigned int n_pages) */ static void relay_free_page_array(struct page **array) { - if (is_vmalloc_addr(array)) - vfree(array); - else - kfree(array); + kvfree(array); } /** diff --git a/lib/genalloc.c b/lib/genalloc.c index d214866eeea2..daf0afb6d979 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -602,12 +602,12 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, EXPORT_SYMBOL(devm_gen_pool_create); /** - * dev_get_gen_pool - Obtain the gen_pool (if any) for a device + * gen_pool_get - Obtain the gen_pool (if any) for a device * @dev: device to retrieve the gen_pool from * * Returns the gen_pool for the device if one is present, or NULL. */ -struct gen_pool *dev_get_gen_pool(struct device *dev) +struct gen_pool *gen_pool_get(struct device *dev) { struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, NULL); @@ -616,11 +616,11 @@ struct gen_pool *dev_get_gen_pool(struct device *dev) return NULL; return *p; } -EXPORT_SYMBOL_GPL(dev_get_gen_pool); +EXPORT_SYMBOL_GPL(gen_pool_get); #ifdef CONFIG_OF /** - * of_get_named_gen_pool - find a pool by phandle property + * of_gen_pool_get - find a pool by phandle property * @np: device node * @propname: property name containing phandle(s) * @index: index into the phandle array @@ -629,7 +629,7 @@ EXPORT_SYMBOL_GPL(dev_get_gen_pool); * address of the device tree node pointed at by the phandle property, * or NULL if not found. */ -struct gen_pool *of_get_named_gen_pool(struct device_node *np, +struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index) { struct platform_device *pdev; @@ -642,7 +642,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np, of_node_put(np_pool); if (!pdev) return NULL; - return dev_get_gen_pool(&pdev->dev); + return gen_pool_get(&pdev->dev); } -EXPORT_SYMBOL_GPL(of_get_named_gen_pool); +EXPORT_SYMBOL_GPL(of_gen_pool_get); #endif /* CONFIG_OF */ diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 99fbc2f238c4..d105a9f56878 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -650,9 +650,8 @@ EXPORT_SYMBOL(sg_miter_stop); * Returns the number of copied bytes. * **/ -static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, off_t skip, - bool to_buffer) +size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, + size_t buflen, off_t skip, bool to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; @@ -689,6 +688,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, local_irq_restore(flags); return offset; } +EXPORT_SYMBOL(sg_copy_buffer); /** * sg_copy_from_buffer - Copy from a linear buffer to an SG list @@ -701,9 +701,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, * **/ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen) + const void *buf, size_t buflen) { - return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false); } EXPORT_SYMBOL(sg_copy_from_buffer); @@ -729,16 +729,16 @@ EXPORT_SYMBOL(sg_copy_to_buffer); * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from - * @skip: Number of bytes to skip before copying * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying * * Returns the number of copied bytes. * **/ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, off_t skip) + const void *buf, size_t buflen, off_t skip) { - return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false); } EXPORT_SYMBOL(sg_pcopy_from_buffer); @@ -747,8 +747,8 @@ EXPORT_SYMBOL(sg_pcopy_from_buffer); * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy to - * @skip: Number of bytes to skip before copying * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying * * Returns the number of copied bytes. * diff --git a/mm/Kconfig b/mm/Kconfig index c180af880ed5..e79de2bd12cd 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -636,3 +636,21 @@ config MAX_STACK_SIZE_MB changed to a smaller value in which case that is used. A sane initial value is 80 MB. + +# For architectures that support deferred memory initialisation +config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT + bool + +config DEFERRED_STRUCT_PAGE_INIT + bool "Defer initialisation of struct pages to kswapd" + default n + depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT + depends on MEMORY_HOTPLUG + help + Ordinarily all struct pages are initialised during early boot in a + single thread. On very large machines this can take a considerable + amount of time. If this option is set, large machines will bring up + a subset of memmap at boot and then initialise the rest in parallel + when kswapd starts. This has a potential performance impact on + processes running early in the lifetime of the systemm until kswapd + finishes the initialisation. diff --git a/mm/bootmem.c b/mm/bootmem.c index 477be696511d..a23dd1934654 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -164,7 +164,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size) end = PFN_DOWN(physaddr + size); for (; cursor < end; cursor++) { - __free_pages_bootmem(pfn_to_page(cursor), 0); + __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); totalram_pages++; } } @@ -172,7 +172,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size) static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) { struct page *page; - unsigned long *map, start, end, pages, count = 0; + unsigned long *map, start, end, pages, cur, count = 0; if (!bdata->node_bootmem_map) return 0; @@ -210,17 +210,17 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) { int order = ilog2(BITS_PER_LONG); - __free_pages_bootmem(pfn_to_page(start), order); + __free_pages_bootmem(pfn_to_page(start), start, order); count += BITS_PER_LONG; start += BITS_PER_LONG; } else { - unsigned long cur = start; + cur = start; start = ALIGN(start + 1, BITS_PER_LONG); while (vec && cur != start) { if (vec & 1) { page = pfn_to_page(cur); - __free_pages_bootmem(page, 0); + __free_pages_bootmem(page, cur, 0); count++; } vec >>= 1; @@ -229,12 +229,13 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) } } + cur = bdata->node_min_pfn; page = virt_to_page(bdata->node_bootmem_map); pages = bdata->node_low_pfn - bdata->node_min_pfn; pages = bootmem_bootmap_pages(pages); count += pages; while (pages--) - __free_pages_bootmem(page++, 0); + __free_pages_bootmem(page++, cur++, 0); bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); diff --git a/mm/internal.h b/mm/internal.h index a25e359a4039..36b23f1e2ca6 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -155,7 +155,8 @@ __find_buddy_index(unsigned long page_idx, unsigned int order) } extern int __isolate_free_page(struct page *page, unsigned int order); -extern void __free_pages_bootmem(struct page *page, unsigned int order); +extern void __free_pages_bootmem(struct page *page, unsigned long pfn, + unsigned int order); extern void prep_compound_page(struct page *page, unsigned long order); #ifdef CONFIG_MEMORY_FAILURE extern bool is_free_buddy_page(struct page *page); @@ -361,10 +362,7 @@ do { \ } while (0) extern void mminit_verify_pageflags_layout(void); -extern void mminit_verify_page_links(struct page *page, - enum zone_type zone, unsigned long nid, unsigned long pfn); extern void mminit_verify_zonelist(void); - #else static inline void mminit_dprintk(enum mminit_level level, @@ -376,11 +374,6 @@ static inline void mminit_verify_pageflags_layout(void) { } -static inline void mminit_verify_page_links(struct page *page, - enum zone_type zone, unsigned long nid, unsigned long pfn) -{ -} - static inline void mminit_verify_zonelist(void) { } diff --git a/mm/memblock.c b/mm/memblock.c index 1b444c730846..87108e77e476 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -820,6 +820,38 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) /** + * __next_reserved_mem_region - next function for for_each_reserved_region() + * @idx: pointer to u64 loop variable + * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL + * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL + * + * Iterate over all reserved memory regions. + */ +void __init_memblock __next_reserved_mem_region(u64 *idx, + phys_addr_t *out_start, + phys_addr_t *out_end) +{ + struct memblock_type *rsv = &memblock.reserved; + + if (*idx >= 0 && *idx < rsv->cnt) { + struct memblock_region *r = &rsv->regions[*idx]; + phys_addr_t base = r->base; + phys_addr_t size = r->size; + + if (out_start) + *out_start = base; + if (out_end) + *out_end = base + size - 1; + + *idx += 1; + return; + } + + /* signal end of iteration */ + *idx = ULLONG_MAX; +} + +/** * __next__mem_range - next function for for_each_free_mem_range() etc. * @idx: pointer to u64 loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes @@ -1387,7 +1419,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) end = PFN_DOWN(base + size); for (; cursor < end; cursor++) { - __free_pages_bootmem(pfn_to_page(cursor), 0); + __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); totalram_pages++; } } diff --git a/mm/mm_init.c b/mm/mm_init.c index 5f420f7fafa1..fdadf918de76 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -11,6 +11,7 @@ #include <linux/export.h> #include <linux/memory.h> #include <linux/notifier.h> +#include <linux/sched.h> #include "internal.h" #ifdef CONFIG_DEBUG_MEMORY_INIT @@ -130,14 +131,6 @@ void __init mminit_verify_pageflags_layout(void) BUG_ON(or_mask != add_mask); } -void __meminit mminit_verify_page_links(struct page *page, enum zone_type zone, - unsigned long nid, unsigned long pfn) -{ - BUG_ON(page_to_nid(page) != nid); - BUG_ON(page_zonenum(page) != zone); - BUG_ON(page_to_pfn(page) != pfn); -} - static __init int set_mminit_loglevel(char *str) { get_option(&str, &mminit_loglevel); diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 5258386fa1be..e57cf24babd6 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -86,7 +86,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size) end = PFN_DOWN(addr + size); for (; cursor < end; cursor++) { - __free_pages_bootmem(pfn_to_page(cursor), 0); + __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); totalram_pages++; } } @@ -101,7 +101,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) while (start + (1UL << order) > end) order--; - __free_pages_bootmem(pfn_to_page(start), order); + __free_pages_bootmem(pfn_to_page(start), start, order); start += (1UL << order); } @@ -130,6 +130,9 @@ static unsigned long __init free_low_memory_core_early(void) memblock_clear_hotplug(0, -1); + for_each_reserved_mem_region(i, &start, &end) + reserve_bootmem_region(start, end); + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) count += __free_memory_core(start, end); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5e6fa06f2784..506eac8b38af 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -18,6 +18,7 @@ #include <linux/mm.h> #include <linux/swap.h> #include <linux/interrupt.h> +#include <linux/rwsem.h> #include <linux/pagemap.h> #include <linux/jiffies.h> #include <linux/bootmem.h> @@ -61,6 +62,7 @@ #include <linux/hugetlb.h> #include <linux/sched/rt.h> #include <linux/page_owner.h> +#include <linux/kthread.h> #include <asm/sections.h> #include <asm/tlbflush.h> @@ -235,6 +237,77 @@ EXPORT_SYMBOL(nr_online_nodes); int page_group_by_mobility_disabled __read_mostly; +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +static inline void reset_deferred_meminit(pg_data_t *pgdat) +{ + pgdat->first_deferred_pfn = ULONG_MAX; +} + +/* Returns true if the struct page for the pfn is uninitialised */ +static inline bool __meminit early_page_uninitialised(unsigned long pfn) +{ + int nid = early_pfn_to_nid(pfn); + + if (pfn >= NODE_DATA(nid)->first_deferred_pfn) + return true; + + return false; +} + +static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) +{ + if (pfn >= NODE_DATA(nid)->first_deferred_pfn) + return true; + + return false; +} + +/* + * Returns false when the remaining initialisation should be deferred until + * later in the boot cycle when it can be parallelised. + */ +static inline bool update_defer_init(pg_data_t *pgdat, + unsigned long pfn, unsigned long zone_end, + unsigned long *nr_initialised) +{ + /* Always populate low zones for address-contrained allocations */ + if (zone_end < pgdat_end_pfn(pgdat)) + return true; + + /* Initialise at least 2G of the highest zone */ + (*nr_initialised)++; + if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) && + (pfn & (PAGES_PER_SECTION - 1)) == 0) { + pgdat->first_deferred_pfn = pfn; + return false; + } + + return true; +} +#else +static inline void reset_deferred_meminit(pg_data_t *pgdat) +{ +} + +static inline bool early_page_uninitialised(unsigned long pfn) +{ + return false; +} + +static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) +{ + return false; +} + +static inline bool update_defer_init(pg_data_t *pgdat, + unsigned long pfn, unsigned long zone_end, + unsigned long *nr_initialised) +{ + return true; +} +#endif + + void set_pageblock_migratetype(struct page *page, int migratetype) { if (unlikely(page_group_by_mobility_disabled && @@ -764,6 +837,75 @@ static int free_tail_pages_check(struct page *head_page, struct page *page) return 0; } +static void __meminit __init_single_page(struct page *page, unsigned long pfn, + unsigned long zone, int nid) +{ + set_page_links(page, zone, nid, pfn); + init_page_count(page); + page_mapcount_reset(page); + page_cpupid_reset_last(page); + + INIT_LIST_HEAD(&page->lru); +#ifdef WANT_PAGE_VIRTUAL + /* The shift won't overflow because ZONE_NORMAL is below 4G. */ + if (!is_highmem_idx(zone)) + set_page_address(page, __va(pfn << PAGE_SHIFT)); +#endif +} + +static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, + int nid) +{ + return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); +} + +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +static void init_reserved_page(unsigned long pfn) +{ + pg_data_t *pgdat; + int nid, zid; + + if (!early_page_uninitialised(pfn)) + return; + + nid = early_pfn_to_nid(pfn); + pgdat = NODE_DATA(nid); + + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + struct zone *zone = &pgdat->node_zones[zid]; + + if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) + break; + } + __init_single_pfn(pfn, zid, nid); +} +#else +static inline void init_reserved_page(unsigned long pfn) +{ +} +#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ + +/* + * Initialised pages do not have PageReserved set. This function is + * called for each range allocated by the bootmem allocator and + * marks the pages PageReserved. The remaining valid pages are later + * sent to the buddy page allocator. + */ +void __meminit reserve_bootmem_region(unsigned long start, unsigned long end) +{ + unsigned long start_pfn = PFN_DOWN(start); + unsigned long end_pfn = PFN_UP(end); + + for (; start_pfn < end_pfn; start_pfn++) { + if (pfn_valid(start_pfn)) { + struct page *page = pfn_to_page(start_pfn); + + init_reserved_page(start_pfn); + SetPageReserved(page); + } + } +} + static bool free_pages_prepare(struct page *page, unsigned int order) { bool compound = PageCompound(page); @@ -818,7 +960,8 @@ static void __free_pages_ok(struct page *page, unsigned int order) local_irq_restore(flags); } -void __init __free_pages_bootmem(struct page *page, unsigned int order) +static void __init __free_pages_boot_core(struct page *page, + unsigned long pfn, unsigned int order) { unsigned int nr_pages = 1 << order; struct page *p = page; @@ -838,6 +981,223 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order) __free_pages(page, order); } +#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ + defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) +/* Only safe to use early in boot when initialisation is single-threaded */ +static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; + +int __meminit early_pfn_to_nid(unsigned long pfn) +{ + int nid; + + /* The system will behave unpredictably otherwise */ + BUG_ON(system_state != SYSTEM_BOOTING); + + nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); + if (nid >= 0) + return nid; + /* just returns 0 */ + return 0; +} +#endif + +#ifdef CONFIG_NODES_SPAN_OTHER_NODES +static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, + struct mminit_pfnnid_cache *state) +{ + int nid; + + nid = __early_pfn_to_nid(pfn, state); + if (nid >= 0 && nid != node) + return false; + return true; +} + +/* Only safe to use early in boot when initialisation is single-threaded */ +static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) +{ + return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache); +} + +#else + +static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) +{ + return true; +} +static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, + struct mminit_pfnnid_cache *state) +{ + return true; +} +#endif + + +void __init __free_pages_bootmem(struct page *page, unsigned long pfn, + unsigned int order) +{ + if (early_page_uninitialised(pfn)) + return; + return __free_pages_boot_core(page, pfn, order); +} + +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +static void __init deferred_free_range(struct page *page, + unsigned long pfn, int nr_pages) +{ + int i; + + if (!page) + return; + + /* Free a large naturally-aligned chunk if possible */ + if (nr_pages == MAX_ORDER_NR_PAGES && + (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) { + set_pageblock_migratetype(page, MIGRATE_MOVABLE); + __free_pages_boot_core(page, pfn, MAX_ORDER-1); + return; + } + + for (i = 0; i < nr_pages; i++, page++, pfn++) + __free_pages_boot_core(page, pfn, 0); +} + +static __initdata DECLARE_RWSEM(pgdat_init_rwsem); + +/* Initialise remaining memory on a node */ +static int __init deferred_init_memmap(void *data) +{ + pg_data_t *pgdat = data; + int nid = pgdat->node_id; + struct mminit_pfnnid_cache nid_init_state = { }; + unsigned long start = jiffies; + unsigned long nr_pages = 0; + unsigned long walk_start, walk_end; + int i, zid; + struct zone *zone; + unsigned long first_init_pfn = pgdat->first_deferred_pfn; + const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); + + if (first_init_pfn == ULONG_MAX) { + up_read(&pgdat_init_rwsem); + return 0; + } + + /* Bind memory initialisation thread to a local node if possible */ + if (!cpumask_empty(cpumask)) + set_cpus_allowed_ptr(current, cpumask); + + /* Sanity check boundaries */ + BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); + BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); + pgdat->first_deferred_pfn = ULONG_MAX; + + /* Only the highest zone is deferred so find it */ + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + zone = pgdat->node_zones + zid; + if (first_init_pfn < zone_end_pfn(zone)) + break; + } + + for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { + unsigned long pfn, end_pfn; + struct page *page = NULL; + struct page *free_base_page = NULL; + unsigned long free_base_pfn = 0; + int nr_to_free = 0; + + end_pfn = min(walk_end, zone_end_pfn(zone)); + pfn = first_init_pfn; + if (pfn < walk_start) + pfn = walk_start; + if (pfn < zone->zone_start_pfn) + pfn = zone->zone_start_pfn; + + for (; pfn < end_pfn; pfn++) { + if (!pfn_valid_within(pfn)) + goto free_range; + + /* + * Ensure pfn_valid is checked every + * MAX_ORDER_NR_PAGES for memory holes + */ + if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { + if (!pfn_valid(pfn)) { + page = NULL; + goto free_range; + } + } + + if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) { + page = NULL; + goto free_range; + } + + /* Minimise pfn page lookups and scheduler checks */ + if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) { + page++; + } else { + nr_pages += nr_to_free; + deferred_free_range(free_base_page, + free_base_pfn, nr_to_free); + free_base_page = NULL; + free_base_pfn = nr_to_free = 0; + + page = pfn_to_page(pfn); + cond_resched(); + } + + if (page->flags) { + VM_BUG_ON(page_zone(page) != zone); + goto free_range; + } + + __init_single_page(page, pfn, zid, nid); + if (!free_base_page) { + free_base_page = page; + free_base_pfn = pfn; + nr_to_free = 0; + } + nr_to_free++; + + /* Where possible, batch up pages for a single free */ + continue; +free_range: + /* Free the current block of pages to allocator */ + nr_pages += nr_to_free; + deferred_free_range(free_base_page, free_base_pfn, + nr_to_free); + free_base_page = NULL; + free_base_pfn = nr_to_free = 0; + } + + first_init_pfn = max(end_pfn, first_init_pfn); + } + + /* Sanity check that the next zone really is unpopulated */ + WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); + + pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, + jiffies_to_msecs(jiffies - start)); + up_read(&pgdat_init_rwsem); + return 0; +} + +void __init page_alloc_init_late(void) +{ + int nid; + + for_each_node_state(nid, N_MEMORY) { + down_read(&pgdat_init_rwsem); + kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); + } + + /* Block until all are initialised */ + down_write(&pgdat_init_rwsem); + up_write(&pgdat_init_rwsem); +} +#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ + #ifdef CONFIG_CMA /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ void __init init_cma_reserved_pageblock(struct page *page) @@ -4150,6 +4510,9 @@ static void setup_zone_migrate_reserve(struct zone *zone) zone->nr_migrate_reserve_block = reserve; for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { + if (!early_page_nid_uninitialised(pfn, zone_to_nid(zone))) + return; + if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); @@ -4212,15 +4575,16 @@ static void setup_zone_migrate_reserve(struct zone *zone) void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, unsigned long start_pfn, enum memmap_context context) { - struct page *page; + pg_data_t *pgdat = NODE_DATA(nid); unsigned long end_pfn = start_pfn + size; unsigned long pfn; struct zone *z; + unsigned long nr_initialised = 0; if (highest_memmap_pfn < end_pfn - 1) highest_memmap_pfn = end_pfn - 1; - z = &NODE_DATA(nid)->node_zones[zone]; + z = &pgdat->node_zones[zone]; for (pfn = start_pfn; pfn < end_pfn; pfn++) { /* * There can be holes in boot-time mem_map[]s @@ -4232,14 +4596,11 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, continue; if (!early_pfn_in_nid(pfn, nid)) continue; + if (!update_defer_init(pgdat, pfn, end_pfn, + &nr_initialised)) + break; } - page = pfn_to_page(pfn); - set_page_links(page, zone, nid, pfn); - mminit_verify_page_links(page, zone, nid, pfn); - init_page_count(page); - page_mapcount_reset(page); - page_cpupid_reset_last(page); - SetPageReserved(page); + /* * Mark the block movable so that blocks are reserved for * movable at startup. This will force kernel allocations @@ -4254,17 +4615,14 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, * check here not to call set_pageblock_migratetype() against * pfn out of zone. */ - if ((z->zone_start_pfn <= pfn) - && (pfn < zone_end_pfn(z)) - && !(pfn & (pageblock_nr_pages - 1))) - set_pageblock_migratetype(page, MIGRATE_MOVABLE); + if (!(pfn & (pageblock_nr_pages - 1))) { + struct page *page = pfn_to_page(pfn); - INIT_LIST_HEAD(&page->lru); -#ifdef WANT_PAGE_VIRTUAL - /* The shift won't overflow because ZONE_NORMAL is below 4G. */ - if (!is_highmem_idx(zone)) - set_page_address(page, __va(pfn << PAGE_SHIFT)); -#endif + __init_single_page(page, pfn, zone, nid); + set_pageblock_migratetype(page, MIGRATE_MOVABLE); + } else { + __init_single_pfn(pfn, zone, nid); + } } } @@ -4522,57 +4880,30 @@ int __meminit init_currently_empty_zone(struct zone *zone, #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID + /* * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. */ -int __meminit __early_pfn_to_nid(unsigned long pfn) +int __meminit __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state) { unsigned long start_pfn, end_pfn; int nid; - /* - * NOTE: The following SMP-unsafe globals are only used early in boot - * when the kernel is running single-threaded. - */ - static unsigned long __meminitdata last_start_pfn, last_end_pfn; - static int __meminitdata last_nid; - if (last_start_pfn <= pfn && pfn < last_end_pfn) - return last_nid; + if (state->last_start <= pfn && pfn < state->last_end) + return state->last_nid; nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); if (nid != -1) { - last_start_pfn = start_pfn; - last_end_pfn = end_pfn; - last_nid = nid; + state->last_start = start_pfn; + state->last_end = end_pfn; + state->last_nid = nid; } return nid; } #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ -int __meminit early_pfn_to_nid(unsigned long pfn) -{ - int nid; - - nid = __early_pfn_to_nid(pfn); - if (nid >= 0) - return nid; - /* just returns 0 */ - return 0; -} - -#ifdef CONFIG_NODES_SPAN_OTHER_NODES -bool __meminit early_pfn_in_nid(unsigned long pfn, int node) -{ - int nid; - - nid = __early_pfn_to_nid(pfn); - if (nid >= 0 && nid != node) - return false; - return true; -} -#endif - /** * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. @@ -5090,6 +5421,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, /* pg_data_t should be reset to zero when it's allocated */ WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); + reset_deferred_meminit(pgdat); pgdat->node_id = nid; pgdat->node_start_pfn = node_start_pfn; #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py index 3c947f0c5dad..927d0d2a3145 100644 --- a/scripts/gdb/linux/dmesg.py +++ b/scripts/gdb/linux/dmesg.py @@ -12,7 +12,6 @@ # import gdb -import string from linux import utils diff --git a/scripts/gdb/linux/lists.py b/scripts/gdb/linux/lists.py new file mode 100644 index 000000000000..3a3775bc162b --- /dev/null +++ b/scripts/gdb/linux/lists.py @@ -0,0 +1,92 @@ +# +# gdb helper commands and functions for Linux kernel debugging +# +# list tools +# +# Copyright (c) Thiebaud Weksteen, 2015 +# +# Authors: +# Thiebaud Weksteen <thiebaud@weksteen.fr> +# +# This work is licensed under the terms of the GNU GPL version 2. +# + +import gdb + +from linux import utils + +list_head = utils.CachedType("struct list_head") + + +def list_check(head): + nb = 0 + if (head.type == list_head.get_type().pointer()): + head = head.dereference() + elif (head.type != list_head.get_type()): + raise gdb.GdbError('argument must be of type (struct list_head [*])') + c = head + try: + gdb.write("Starting with: {}\n".format(c)) + except gdb.MemoryError: + gdb.write('head is not accessible\n') + return + while True: + p = c['prev'].dereference() + n = c['next'].dereference() + try: + if p['next'] != c.address: + gdb.write('prev.next != current: ' + 'current@{current_addr}={current} ' + 'prev@{p_addr}={p}\n'.format( + current_addr=c.address, + current=c, + p_addr=p.address, + p=p, + )) + return + except gdb.MemoryError: + gdb.write('prev is not accessible: ' + 'current@{current_addr}={current}\n'.format( + current_addr=c.address, + current=c + )) + return + try: + if n['prev'] != c.address: + gdb.write('next.prev != current: ' + 'current@{current_addr}={current} ' + 'next@{n_addr}={n}\n'.format( + current_addr=c.address, + current=c, + n_addr=n.address, + n=n, + )) + return + except gdb.MemoryError: + gdb.write('next is not accessible: ' + 'current@{current_addr}={current}\n'.format( + current_addr=c.address, + current=c + )) + return + c = n + nb += 1 + if c == head: + gdb.write("list is consistent: {} node(s)\n".format(nb)) + return + + +class LxListChk(gdb.Command): + """Verify a list consistency""" + + def __init__(self): + super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA, + gdb.COMPLETE_EXPRESSION) + + def invoke(self, arg, from_tty): + argv = gdb.string_to_argv(arg) + if len(argv) != 1: + raise gdb.GdbError("lx-list-check takes one argument") + list_check(gdb.parse_and_eval(argv[0])) + +LxListChk() diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py index cd5bea965d4e..627750cb420d 100644 --- a/scripts/gdb/linux/symbols.py +++ b/scripts/gdb/linux/symbols.py @@ -14,9 +14,8 @@ import gdb import os import re -import string -from linux import modules, utils +from linux import modules if hasattr(gdb, 'Breakpoint'): @@ -97,7 +96,7 @@ lx-symbols command.""" return "" attrs = sect_attrs['attrs'] section_name_to_address = { - attrs[n]['name'].string() : attrs[n]['address'] + attrs[n]['name'].string(): attrs[n]['address'] for n in range(int(sect_attrs['nsections']))} args = [] for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]: @@ -124,7 +123,7 @@ lx-symbols command.""" addr=module_addr, sections=self._section_arguments(module)) gdb.execute(cmdline, to_string=True) - if not module_name in self.loaded_modules: + if module_name not in self.loaded_modules: self.loaded_modules.append(module_name) else: gdb.write("no module object found for '{0}'\n".format(module_name)) @@ -164,7 +163,7 @@ lx-symbols command.""" self.load_all_symbols() if hasattr(gdb, 'Breakpoint'): - if not self.breakpoint is None: + if self.breakpoint is not None: self.breakpoint.delete() self.breakpoint = None self.breakpoint = LoadModuleBreakpoint( diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py index e2037d9bb7eb..862a4ae24d49 100644 --- a/scripts/gdb/linux/tasks.py +++ b/scripts/gdb/linux/tasks.py @@ -18,8 +18,8 @@ from linux import utils task_type = utils.CachedType("struct task_struct") + def task_lists(): - global task_type task_ptr_type = task_type.get_type().pointer() init_task = gdb.parse_and_eval("init_task").address t = g = init_task @@ -38,6 +38,7 @@ def task_lists(): if t == init_task: return + def get_task_by_pid(pid): for task in task_lists(): if int(task['pid']) == pid: @@ -65,13 +66,28 @@ return that task_struct variable which PID matches.""" LxTaskByPidFunc() +class LxPs(gdb.Command): + """Dump Linux tasks.""" + + def __init__(self): + super(LxPs, self).__init__("lx-ps", gdb.COMMAND_DATA) + + def invoke(self, arg, from_tty): + for task in task_lists(): + gdb.write("{address} {pid} {comm}\n".format( + address=task, + pid=task["pid"], + comm=task["comm"].string())) + +LxPs() + + thread_info_type = utils.CachedType("struct thread_info") ia64_task_size = None def get_thread_info(task): - global thread_info_type thread_info_ptr_type = thread_info_type.get_type().pointer() if utils.is_target_arch("ia64"): global ia64_task_size diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py index 128c306db3ee..0893b326a28b 100644 --- a/scripts/gdb/linux/utils.py +++ b/scripts/gdb/linux/utils.py @@ -83,7 +83,7 @@ def get_target_endianness(): elif "big endian" in endian: target_endianness = BIG_ENDIAN else: - raise gdb.GdgError("unknown endianness '{0}'".format(str(endian))) + raise gdb.GdbError("unknown endianness '{0}'".format(str(endian))) return target_endianness @@ -151,6 +151,6 @@ def get_gdbserver_type(): gdbserver_type = GDBSERVER_QEMU elif probe_kgdb(): gdbserver_type = GDBSERVER_KGDB - if not gdbserver_type is None and hasattr(gdb, 'events'): + if gdbserver_type is not None and hasattr(gdb, 'events'): gdb.events.exited.connect(exit_handler) return gdbserver_type diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py index 48489285f119..ce82bf5c3943 100644 --- a/scripts/gdb/vmlinux-gdb.py +++ b/scripts/gdb/vmlinux-gdb.py @@ -28,3 +28,4 @@ else: import linux.dmesg import linux.tasks import linux.cpus + import linux.lists diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 082509eb805d..f05cb6a8cbe0 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -124,7 +124,7 @@ static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size) dmab->addr = 0; if (dev->of_node) - pool = of_get_named_gen_pool(dev->of_node, "iram", 0); + pool = of_gen_pool_get(dev->of_node, "iram", 0); if (!pool) return; |