summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDennis Zhou (Facebook) <dennisszhou@gmail.com>2017-07-25 01:02:19 +0200
committerTejun Heo <tj@kernel.org>2017-07-26 23:41:06 +0200
commitb4c2116cfae65b09761b7ba34453733e745a6f77 (patch)
tree301f2c2ed922ec0acd862bf042c20a397213bf8e /mm
parentpercpu: use metadata blocks to update the chunk contig hint (diff)
downloadlinux-b4c2116cfae65b09761b7ba34453733e745a6f77.tar.xz
linux-b4c2116cfae65b09761b7ba34453733e745a6f77.zip
percpu: update pcpu_find_block_fit to use an iterator
The simple, and expensive, way to find a free area is to iterate over the entire bitmap until an area is found that fits the allocation size and alignment. This patch makes use of an iterate that find an area to check by using the block level contig hints. It will only return an area that can fit the size and alignment request. If the request can fit inside a block, it returns the first_free bit to start checking from to see if it can be fulfilled prior to the contig hint. The pcpu_alloc_area check has a bound of a block size added in case it is wrong. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Reviewed-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c112
1 files changed, 92 insertions, 20 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 0f05647d9547..b4754f3bf38f 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -355,10 +355,72 @@ static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
}
}
+/**
+ * pcpu_next_fit_region - finds fit areas for a given allocation request
+ * @chunk: chunk of interest
+ * @alloc_bits: size of allocation
+ * @align: alignment of area (max PAGE_SIZE)
+ * @bit_off: chunk offset
+ * @bits: size of free area
+ *
+ * Finds the next free region that is viable for use with a given size and
+ * alignment. This only returns if there is a valid area to be used for this
+ * allocation. block->first_free is returned if the allocation request fits
+ * within the block to see if the request can be fulfilled prior to the contig
+ * hint.
+ */
+static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
+ int align, int *bit_off, int *bits)
+{
+ int i = pcpu_off_to_block_index(*bit_off);
+ int block_off = pcpu_off_to_block_off(*bit_off);
+ struct pcpu_block_md *block;
+
+ *bits = 0;
+ for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
+ block++, i++) {
+ /* handles contig area across blocks */
+ if (*bits) {
+ *bits += block->left_free;
+ if (*bits >= alloc_bits)
+ return;
+ if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
+ continue;
+ }
+
+ /* check block->contig_hint */
+ *bits = ALIGN(block->contig_hint_start, align) -
+ block->contig_hint_start;
+ /*
+ * This uses the block offset to determine if this has been
+ * checked in the prior iteration.
+ */
+ if (block->contig_hint &&
+ block->contig_hint_start >= block_off &&
+ block->contig_hint >= *bits + alloc_bits) {
+ *bits += alloc_bits + block->contig_hint_start -
+ block->first_free;
+ *bit_off = pcpu_block_off_to_off(i, block->first_free);
+ return;
+ }
+
+ *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
+ align);
+ *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
+ *bit_off = pcpu_block_off_to_off(i, *bit_off);
+ if (*bits >= alloc_bits)
+ return;
+ }
+
+ /* no valid offsets were found - fail condition */
+ *bit_off = pcpu_chunk_map_bits(chunk);
+}
+
/*
* Metadata free area iterators. These perform aggregation of free areas
* based on the metadata blocks and return the offset @bit_off and size in
- * bits of the free area @bits.
+ * bits of the free area @bits. pcpu_for_each_fit_region only returns when
+ * a fit is found for the allocation request.
*/
#define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
@@ -366,6 +428,14 @@ static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
(bit_off) += (bits) + 1, \
pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
+#define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
+ for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
+ &(bits)); \
+ (bit_off) < pcpu_chunk_map_bits((chunk)); \
+ (bit_off) += (bits), \
+ pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
+ &(bits)))
+
/**
* pcpu_mem_zalloc - allocate memory
* @size: bytes to allocate
@@ -825,6 +895,14 @@ static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
* @align: alignment of area (max PAGE_SIZE bytes)
* @pop_only: use populated regions only
*
+ * Given a chunk and an allocation spec, find the offset to begin searching
+ * for a free region. This iterates over the bitmap metadata blocks to
+ * find an offset that will be guaranteed to fit the requirements. It is
+ * not quite first fit as if the allocation does not fit in the contig hint
+ * of a block or chunk, it is skipped. This errs on the side of caution
+ * to prevent excess iteration. Poor alignment can cause the allocator to
+ * skip over blocks and chunks that have valid free areas.
+ *
* RETURNS:
* The offset in the bitmap to begin searching.
* -1 if no offset is found.
@@ -832,8 +910,7 @@ static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
size_t align, bool pop_only)
{
- int bit_off, bits;
- int re; /* region end */
+ int bit_off, bits, next_off;
/*
* Check to see if the allocation can fit in the chunk's contig hint.
@@ -846,22 +923,14 @@ static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
if (bit_off + alloc_bits > chunk->contig_bits)
return -1;
- pcpu_for_each_unpop_region(chunk->alloc_map, bit_off, re,
- chunk->first_bit,
- pcpu_chunk_map_bits(chunk)) {
- bits = re - bit_off;
-
- /* check alignment */
- bits -= ALIGN(bit_off, align) - bit_off;
- bit_off = ALIGN(bit_off, align);
- if (bits < alloc_bits)
- continue;
-
- bits = alloc_bits;
+ bit_off = chunk->first_bit;
+ bits = 0;
+ pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
- &bit_off))
+ &next_off))
break;
+ bit_off = next_off;
bits = 0;
}
@@ -879,9 +948,12 @@ static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
* @start: bit_off to start searching
*
* This function takes in a @start offset to begin searching to fit an
- * allocation of @alloc_bits with alignment @align. If it confirms a
- * valid free area, it then updates the allocation and boundary maps
- * accordingly.
+ * allocation of @alloc_bits with alignment @align. It needs to scan
+ * the allocation map because if it fits within the block's contig hint,
+ * @start will be block->first_free. This is an attempt to fill the
+ * allocation prior to breaking the contig hint. The allocation and
+ * boundary maps are updated accordingly if it confirms a valid
+ * free area.
*
* RETURNS:
* Allocated addr offset in @chunk on success.
@@ -900,7 +972,7 @@ static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
/*
* Search to find a fit.
*/
- end = start + alloc_bits;
+ end = start + alloc_bits + PCPU_BITMAP_BLOCK_BITS;
bit_off = bitmap_find_next_zero_area(chunk->alloc_map, end, start,
alloc_bits, align_mask);
if (bit_off >= end)