summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_page_alloc.c
diff options
context:
space:
mode:
authorMichel Dänzer <michel.daenzer@amd.com>2017-11-03 16:00:35 +0100
committerAlex Deucher <alexander.deucher@amd.com>2017-11-04 14:48:28 +0100
commit767601d100a53e653233aebca7c262ce0addfa99 (patch)
tree080c5ad6705b39c91df0ddee9c0658409c93130f /drivers/gpu/drm/ttm/ttm_page_alloc.c
parentdrm/ttm: Always and only destroy bo->ttm_resv in ttm_bo_release_list (diff)
downloadlinux-767601d100a53e653233aebca7c262ce0addfa99.tar.xz
linux-767601d100a53e653233aebca7c262ce0addfa99.zip
drm/ttm: Downgrade pr_err to pr_debug for memory allocation failures
Memory allocation failure should generally be handled gracefully by callers. In particular, with transparent hugepage support, attempts to allocate huge pages can fail under memory pressure, but the callers fall back to allocating individual pages instead. In that case, there would be spurious [TTM] Unable to get page %u error messages in dmesg. Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Michel Dänzer <michel.daenzer@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 4d688c8d7853..316f831ad5f0 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -329,7 +329,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
- pr_err("Failed to allocate memory for pool free operation\n");
+ pr_debug("Failed to allocate memory for pool free operation\n");
return 0;
}
@@ -517,7 +517,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- pr_err("Unable to allocate table for new pages\n");
+ pr_debug("Unable to allocate table for new pages\n");
return -ENOMEM;
}
@@ -525,7 +525,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
p = alloc_pages(gfp_flags, order);
if (!p) {
- pr_err("Unable to get page %u\n", i);
+ pr_debug("Unable to get page %u\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
@@ -625,7 +625,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
++pool->nrefills;
pool->npages += alloc_size;
} else {
- pr_err("Failed to fill pool (%p)\n", pool);
+ pr_debug("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &new_pages, lru) {
++cpages;
@@ -885,8 +885,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
while (npages) {
p = alloc_page(gfp_flags);
if (!p) {
-
- pr_err("Unable to allocate page\n");
+ pr_debug("Unable to allocate page\n");
return -ENOMEM;
}
@@ -925,7 +924,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
/* If there is any pages in the list put them back to
* the pool.
*/
- pr_err("Failed to allocate extra pages for large request\n");
+ pr_debug("Failed to allocate extra pages for large request\n");
ttm_put_pages(pages, count, flags, cstate);
return r;
}