diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/gup_benchmark.c | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 4 | ||||
-rw-r--r-- | mm/hugetlb.c | 3 | ||||
-rw-r--r-- | mm/percpu-stats.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 3 | ||||
-rw-r--r-- | mm/slub.c | 19 | ||||
-rw-r--r-- | mm/swap_slots.c | 4 | ||||
-rw-r--r-- | mm/swap_state.c | 2 | ||||
-rw-r--r-- | mm/swapfile.c | 5 |
9 files changed, 25 insertions, 19 deletions
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c index 0f44759486e2..6a473709e9b6 100644 --- a/mm/gup_benchmark.c +++ b/mm/gup_benchmark.c @@ -23,7 +23,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd, struct page **pages; nr_pages = gup->size / PAGE_SIZE; - pages = kvzalloc(sizeof(void *) * nr_pages, GFP_KERNEL); + pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); if (!pages) return -ENOMEM; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index ba8fdc0b6e7f..1cd7c1a57a14 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1131,8 +1131,8 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd, unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ - pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, - GFP_KERNEL); + pages = kmalloc_array(HPAGE_PMD_NR, sizeof(struct page *), + GFP_KERNEL); if (unlikely(!pages)) { ret |= VM_FAULT_OOM; goto out; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 696befffe6f7..3612fbb32e9d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2798,7 +2798,8 @@ static int __init hugetlb_init(void) num_fault_mutexes = 1; #endif hugetlb_fault_mutex_table = - kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL); + kmalloc_array(num_fault_mutexes, sizeof(struct mutex), + GFP_KERNEL); BUG_ON(!hugetlb_fault_mutex_table); for (i = 0; i < num_fault_mutexes; i++) diff --git a/mm/percpu-stats.c b/mm/percpu-stats.c index 063ff60ecd90..b5fdd43b60c9 100644 --- a/mm/percpu-stats.c +++ b/mm/percpu-stats.c @@ -144,7 +144,7 @@ alloc_buffer: spin_unlock_irq(&pcpu_lock); /* there can be at most this many free and allocated fragments */ - buffer = vmalloc((2 * max_nr_alloc + 1) * sizeof(int)); + buffer = vmalloc(array_size(sizeof(int), (2 * max_nr_alloc + 1))); if (!buffer) return -ENOMEM; diff --git a/mm/slab.c b/mm/slab.c index 36688f6c87eb..aa76a70e087e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4338,7 +4338,8 @@ static int leaks_show(struct seq_file *m, void *p) if (x[0] == x[1]) { /* Increase the buffer size */ mutex_unlock(&slab_mutex); - m->private = kzalloc(x[0] * 4 * sizeof(unsigned long), GFP_KERNEL); + m->private = kcalloc(x[0] * 4, sizeof(unsigned long), + GFP_KERNEL); if (!m->private) { /* Too bad, we are really out */ m->private = x; diff --git a/mm/slub.c b/mm/slub.c index 15505479c3ab..a3b8467c14af 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3623,8 +3623,9 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, #ifdef CONFIG_SLUB_DEBUG void *addr = page_address(page); void *p; - unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * - sizeof(long), GFP_ATOMIC); + unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects), + sizeof(long), + GFP_ATOMIC); if (!map) return; slab_err(s, page, text, s->name); @@ -4412,8 +4413,9 @@ static long validate_slab_cache(struct kmem_cache *s) { int node; unsigned long count = 0; - unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * - sizeof(unsigned long), GFP_KERNEL); + unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)), + sizeof(unsigned long), + GFP_KERNEL); struct kmem_cache_node *n; if (!map) @@ -4573,8 +4575,9 @@ static int list_locations(struct kmem_cache *s, char *buf, unsigned long i; struct loc_track t = { 0, 0, NULL }; int node; - unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * - sizeof(unsigned long), GFP_KERNEL); + unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)), + sizeof(unsigned long), + GFP_KERNEL); struct kmem_cache_node *n; if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), @@ -4750,7 +4753,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, int x; unsigned long *nodes; - nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); + nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); if (!nodes) return -ENOMEM; @@ -5293,7 +5296,7 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) unsigned long sum = 0; int cpu; int len; - int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); + int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/mm/swap_slots.c b/mm/swap_slots.c index f51ac051c0c9..a791411fed71 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -122,12 +122,12 @@ static int alloc_swap_slot_cache(unsigned int cpu) * as kvzalloc could trigger reclaim and get_swap_page, * which can lock swap_slots_cache_mutex. */ - slots = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE, + slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), GFP_KERNEL); if (!slots) return -ENOMEM; - slots_ret = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE, + slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), GFP_KERNEL); if (!slots_ret) { kvfree(slots); diff --git a/mm/swap_state.c b/mm/swap_state.c index ab8e59cd18ea..ecee9c6c4cc1 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -620,7 +620,7 @@ int init_swap_address_space(unsigned int type, unsigned long nr_pages) unsigned int i, nr; nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); - spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL); + spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); if (!spaces) return -ENOMEM; for (i = 0; i < nr; i++) { diff --git a/mm/swapfile.c b/mm/swapfile.c index 78a015fcec3b..925cf795a652 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3196,7 +3196,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->cluster_next = 1 + (prandom_u32() % p->highest_bit); nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); - cluster_info = kvzalloc(nr_cluster * sizeof(*cluster_info), + cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info), GFP_KERNEL); if (!cluster_info) { error = -ENOMEM; @@ -3233,7 +3233,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) } /* frontswap enabled? set up bit-per-page map for frontswap */ if (IS_ENABLED(CONFIG_FRONTSWAP)) - frontswap_map = kvzalloc(BITS_TO_LONGS(maxpages) * sizeof(long), + frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages), + sizeof(long), GFP_KERNEL); if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { |