diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
33 files changed, 1105 insertions, 225 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index cc4cdca7176e..8c02fa5852e7 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -9,7 +9,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ - ttm_object.o ttm_lock.o + ttm_object.o ttm_lock.o ttm_memory.o vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c new file mode 100644 index 000000000000..aeb0a22a2c34 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/ttm_memory.c @@ -0,0 +1,682 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#define pr_fmt(fmt) "[TTM] " fmt + +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/swap.h> + +#include <drm/drm_device.h> +#include <drm/drm_file.h> +#include <drm/ttm/ttm_device.h> + +#include "ttm_memory.h" + +#define TTM_MEMORY_ALLOC_RETRIES 4 + +struct ttm_mem_global ttm_mem_glob; +EXPORT_SYMBOL(ttm_mem_glob); + +struct ttm_mem_zone { + struct kobject kobj; + struct ttm_mem_global *glob; + const char *name; + uint64_t zone_mem; + uint64_t emer_mem; + uint64_t max_mem; + uint64_t swap_limit; + uint64_t used_mem; +}; + +static struct attribute ttm_mem_sys = { + .name = "zone_memory", + .mode = S_IRUGO +}; +static struct attribute ttm_mem_emer = { + .name = "emergency_memory", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_max = { + .name = "available_memory", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_swap = { + .name = "swap_limit", + .mode = S_IRUGO | S_IWUSR +}; +static struct attribute ttm_mem_used = { + .name = "used_memory", + .mode = S_IRUGO +}; + +static void ttm_mem_zone_kobj_release(struct kobject *kobj) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + + pr_info("Zone %7s: Used memory at exit: %llu KiB\n", + zone->name, (unsigned long long)zone->used_mem >> 10); + kfree(zone); +} + +static ssize_t ttm_mem_zone_show(struct kobject *kobj, + struct attribute *attr, + char *buffer) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + uint64_t val = 0; + + spin_lock(&zone->glob->lock); + if (attr == &ttm_mem_sys) + val = zone->zone_mem; + else if (attr == &ttm_mem_emer) + val = zone->emer_mem; + else if (attr == &ttm_mem_max) + val = zone->max_mem; + else if (attr == &ttm_mem_swap) + val = zone->swap_limit; + else if (attr == &ttm_mem_used) + val = zone->used_mem; + spin_unlock(&zone->glob->lock); + + return snprintf(buffer, PAGE_SIZE, "%llu\n", + (unsigned long long) val >> 10); +} + +static void ttm_check_swapping(struct ttm_mem_global *glob); + +static ssize_t ttm_mem_zone_store(struct kobject *kobj, + struct attribute *attr, + const char *buffer, + size_t size) +{ + struct ttm_mem_zone *zone = + container_of(kobj, struct ttm_mem_zone, kobj); + int chars; + unsigned long val; + uint64_t val64; + + chars = sscanf(buffer, "%lu", &val); + if (chars == 0) + return size; + + val64 = val; + val64 <<= 10; + + spin_lock(&zone->glob->lock); + if (val64 > zone->zone_mem) + val64 = zone->zone_mem; + if (attr == &ttm_mem_emer) { + zone->emer_mem = val64; + if (zone->max_mem > val64) + zone->max_mem = val64; + } else if (attr == &ttm_mem_max) { + zone->max_mem = val64; + if (zone->emer_mem < val64) + zone->emer_mem = val64; + } else if (attr == &ttm_mem_swap) + zone->swap_limit = val64; + spin_unlock(&zone->glob->lock); + + ttm_check_swapping(zone->glob); + + return size; +} + +static struct attribute *ttm_mem_zone_attrs[] = { + &ttm_mem_sys, + &ttm_mem_emer, + &ttm_mem_max, + &ttm_mem_swap, + &ttm_mem_used, + NULL +}; + +static const struct sysfs_ops ttm_mem_zone_ops = { + .show = &ttm_mem_zone_show, + .store = &ttm_mem_zone_store +}; + +static struct kobj_type ttm_mem_zone_kobj_type = { + .release = &ttm_mem_zone_kobj_release, + .sysfs_ops = &ttm_mem_zone_ops, + .default_attrs = ttm_mem_zone_attrs, +}; + +static struct attribute ttm_mem_global_lower_mem_limit = { + .name = "lower_mem_limit", + .mode = S_IRUGO | S_IWUSR +}; + +static ssize_t ttm_mem_global_show(struct kobject *kobj, + struct attribute *attr, + char *buffer) +{ + struct ttm_mem_global *glob = + container_of(kobj, struct ttm_mem_global, kobj); + uint64_t val = 0; + + spin_lock(&glob->lock); + val = glob->lower_mem_limit; + spin_unlock(&glob->lock); + /* convert from number of pages to KB */ + val <<= (PAGE_SHIFT - 10); + return snprintf(buffer, PAGE_SIZE, "%llu\n", + (unsigned long long) val); +} + +static ssize_t ttm_mem_global_store(struct kobject *kobj, + struct attribute *attr, + const char *buffer, + size_t size) +{ + int chars; + uint64_t val64; + unsigned long val; + struct ttm_mem_global *glob = + container_of(kobj, struct ttm_mem_global, kobj); + + chars = sscanf(buffer, "%lu", &val); + if (chars == 0) + return size; + + val64 = val; + /* convert from KB to number of pages */ + val64 >>= (PAGE_SHIFT - 10); + + spin_lock(&glob->lock); + glob->lower_mem_limit = val64; + spin_unlock(&glob->lock); + + return size; +} + +static struct attribute *ttm_mem_global_attrs[] = { + &ttm_mem_global_lower_mem_limit, + NULL +}; + +static const struct sysfs_ops ttm_mem_global_ops = { + .show = &ttm_mem_global_show, + .store = &ttm_mem_global_store, +}; + +static struct kobj_type ttm_mem_glob_kobj_type = { + .sysfs_ops = &ttm_mem_global_ops, + .default_attrs = ttm_mem_global_attrs, +}; + +static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, + bool from_wq, uint64_t extra) +{ + unsigned int i; + struct ttm_mem_zone *zone; + uint64_t target; + + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + + if (from_wq) + target = zone->swap_limit; + else if (capable(CAP_SYS_ADMIN)) + target = zone->emer_mem; + else + target = zone->max_mem; + + target = (extra > target) ? 0ULL : target; + + if (zone->used_mem > target) + return true; + } + return false; +} + +/* + * At this point we only support a single shrink callback. + * Extend this if needed, perhaps using a linked list of callbacks. + * Note that this function is reentrant: + * many threads may try to swap out at any given time. + */ + +static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, + uint64_t extra, struct ttm_operation_ctx *ctx) +{ + int ret; + + spin_lock(&glob->lock); + + while (ttm_zones_above_swap_target(glob, from_wq, extra)) { + spin_unlock(&glob->lock); + ret = ttm_global_swapout(ctx, GFP_KERNEL); + spin_lock(&glob->lock); + if (unlikely(ret <= 0)) + break; + } + + spin_unlock(&glob->lock); +} + +static void ttm_shrink_work(struct work_struct *work) +{ + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; + struct ttm_mem_global *glob = + container_of(work, struct ttm_mem_global, work); + + ttm_shrink(glob, true, 0ULL, &ctx); +} + +static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); + uint64_t mem; + int ret; + + if (unlikely(!zone)) + return -ENOMEM; + + mem = si->totalram - si->totalhigh; + mem *= si->mem_unit; + + zone->name = "kernel"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_kernel = zone; + ret = kobject_init_and_add( + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); + if (unlikely(ret != 0)) { + kobject_put(&zone->kobj); + return ret; + } + glob->zones[glob->num_zones++] = zone; + return 0; +} + +#ifdef CONFIG_HIGHMEM +static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone; + uint64_t mem; + int ret; + + if (si->totalhigh == 0) + return 0; + + zone = kzalloc(sizeof(*zone), GFP_KERNEL); + if (unlikely(!zone)) + return -ENOMEM; + + mem = si->totalram; + mem *= si->mem_unit; + + zone->name = "highmem"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_highmem = zone; + ret = kobject_init_and_add( + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", + zone->name); + if (unlikely(ret != 0)) { + kobject_put(&zone->kobj); + return ret; + } + glob->zones[glob->num_zones++] = zone; + return 0; +} +#else +static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, + const struct sysinfo *si) +{ + struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); + uint64_t mem; + int ret; + + if (unlikely(!zone)) + return -ENOMEM; + + mem = si->totalram; + mem *= si->mem_unit; + + /** + * No special dma32 zone needed. + */ + + if (mem <= ((uint64_t) 1ULL << 32)) { + kfree(zone); + return 0; + } + + /* + * Limit max dma32 memory to 4GB for now + * until we can figure out how big this + * zone really is. + */ + + mem = ((uint64_t) 1ULL << 32); + zone->name = "dma32"; + zone->zone_mem = mem; + zone->max_mem = mem >> 1; + zone->emer_mem = (mem >> 1) + (mem >> 2); + zone->swap_limit = zone->max_mem - (mem >> 3); + zone->used_mem = 0; + zone->glob = glob; + glob->zone_dma32 = zone; + ret = kobject_init_and_add( + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); + if (unlikely(ret != 0)) { + kobject_put(&zone->kobj); + return ret; + } + glob->zones[glob->num_zones++] = zone; + return 0; +} +#endif + +int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev) +{ + struct sysinfo si; + int ret; + int i; + struct ttm_mem_zone *zone; + + spin_lock_init(&glob->lock); + glob->swap_queue = create_singlethread_workqueue("ttm_swap"); + INIT_WORK(&glob->work, ttm_shrink_work); + + ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type, + &dev->kobj, "memory_accounting"); + if (unlikely(ret != 0)) { + kobject_put(&glob->kobj); + return ret; + } + + si_meminfo(&si); + + /* set it as 0 by default to keep original behavior of OOM */ + glob->lower_mem_limit = 0; + + ret = ttm_mem_init_kernel_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#ifdef CONFIG_HIGHMEM + ret = ttm_mem_init_highmem_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#else + ret = ttm_mem_init_dma32_zone(glob, &si); + if (unlikely(ret != 0)) + goto out_no_zone; +#endif + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + pr_info("Zone %7s: Available graphics memory: %llu KiB\n", + zone->name, (unsigned long long)zone->max_mem >> 10); + } + return 0; +out_no_zone: + ttm_mem_global_release(glob); + return ret; +} + +void ttm_mem_global_release(struct ttm_mem_global *glob) +{ + struct ttm_mem_zone *zone; + unsigned int i; + + flush_workqueue(glob->swap_queue); + destroy_workqueue(glob->swap_queue); + glob->swap_queue = NULL; + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + kobject_del(&zone->kobj); + kobject_put(&zone->kobj); + } + kobject_del(&glob->kobj); + kobject_put(&glob->kobj); + memset(glob, 0, sizeof(*glob)); +} + +static void ttm_check_swapping(struct ttm_mem_global *glob) +{ + bool needs_swapping = false; + unsigned int i; + struct ttm_mem_zone *zone; + + spin_lock(&glob->lock); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (zone->used_mem > zone->swap_limit) { + needs_swapping = true; + break; + } + } + + spin_unlock(&glob->lock); + + if (unlikely(needs_swapping)) + (void)queue_work(glob->swap_queue, &glob->work); + +} + +static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, + struct ttm_mem_zone *single_zone, + uint64_t amount) +{ + unsigned int i; + struct ttm_mem_zone *zone; + + spin_lock(&glob->lock); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; + zone->used_mem -= amount; + } + spin_unlock(&glob->lock); +} + +void ttm_mem_global_free(struct ttm_mem_global *glob, + uint64_t amount) +{ + return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount); +} +EXPORT_SYMBOL(ttm_mem_global_free); + +/* + * check if the available mem is under lower memory limit + * + * a. if no swap disk at all or free swap space is under swap_mem_limit + * but available system mem is bigger than sys_mem_limit, allow TTM + * allocation; + * + * b. if the available system mem is less than sys_mem_limit but free + * swap disk is bigger than swap_mem_limit, allow TTM allocation. + */ +bool +ttm_check_under_lowerlimit(struct ttm_mem_global *glob, + uint64_t num_pages, + struct ttm_operation_ctx *ctx) +{ + int64_t available; + + /* We allow over commit during suspend */ + if (ctx->force_alloc) + return false; + + available = get_nr_swap_pages() + si_mem_available(); + available -= num_pages; + if (available < glob->lower_mem_limit) + return true; + + return false; +} + +static int ttm_mem_global_reserve(struct ttm_mem_global *glob, + struct ttm_mem_zone *single_zone, + uint64_t amount, bool reserve) +{ + uint64_t limit; + int ret = -ENOMEM; + unsigned int i; + struct ttm_mem_zone *zone; + + spin_lock(&glob->lock); + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; + + limit = (capable(CAP_SYS_ADMIN)) ? + zone->emer_mem : zone->max_mem; + + if (zone->used_mem > limit) + goto out_unlock; + } + + if (reserve) { + for (i = 0; i < glob->num_zones; ++i) { + zone = glob->zones[i]; + if (single_zone && zone != single_zone) + continue; + zone->used_mem += amount; + } + } + + ret = 0; +out_unlock: + spin_unlock(&glob->lock); + ttm_check_swapping(glob); + + return ret; +} + + +static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, + struct ttm_mem_zone *single_zone, + uint64_t memory, + struct ttm_operation_ctx *ctx) +{ + int count = TTM_MEMORY_ALLOC_RETRIES; + + while (unlikely(ttm_mem_global_reserve(glob, + single_zone, + memory, true) + != 0)) { + if (ctx->no_wait_gpu) + return -ENOMEM; + if (unlikely(count-- == 0)) + return -ENOMEM; + ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx); + } + + return 0; +} + +int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + struct ttm_operation_ctx *ctx) +{ + /** + * Normal allocations of kernel memory are registered in + * the kernel zone. + */ + + return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx); +} +EXPORT_SYMBOL(ttm_mem_global_alloc); + +int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, + struct page *page, uint64_t size, + struct ttm_operation_ctx *ctx) +{ + struct ttm_mem_zone *zone = NULL; + + /** + * Page allocations may be registed in a single zone + * only if highmem or !dma32. + */ + +#ifdef CONFIG_HIGHMEM + if (PageHighMem(page) && glob->zone_highmem != NULL) + zone = glob->zone_highmem; +#else + if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) + zone = glob->zone_kernel; +#endif + return ttm_mem_global_alloc_zone(glob, zone, size, ctx); +} + +void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page, + uint64_t size) +{ + struct ttm_mem_zone *zone = NULL; + +#ifdef CONFIG_HIGHMEM + if (PageHighMem(page) && glob->zone_highmem != NULL) + zone = glob->zone_highmem; +#else + if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) + zone = glob->zone_kernel; +#endif + ttm_mem_global_free_zone(glob, zone, size); +} + +size_t ttm_round_pot(size_t size) +{ + if ((size & (size - 1)) == 0) + return size; + else if (size > PAGE_SIZE) + return PAGE_ALIGN(size); + else { + size_t tmp_size = 4; + + while (tmp_size < size) + tmp_size <<= 1; + + return tmp_size; + } + return 0; +} +EXPORT_SYMBOL(ttm_round_pot); diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.h b/drivers/gpu/drm/vmwgfx/ttm_memory.h new file mode 100644 index 000000000000..c50dba774485 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/ttm_memory.h @@ -0,0 +1,96 @@ +/************************************************************************** + * + * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef TTM_MEMORY_H +#define TTM_MEMORY_H + +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/bug.h> +#include <linux/wait.h> +#include <linux/errno.h> +#include <linux/kobject.h> +#include <linux/mm.h> + +#include <drm/ttm/ttm_bo_api.h> + +/** + * struct ttm_mem_global - Global memory accounting structure. + * + * @shrink: A single callback to shrink TTM memory usage. Extend this + * to a linked list to be able to handle multiple callbacks when needed. + * @swap_queue: A workqueue to handle shrinking in low memory situations. We + * need a separate workqueue since it will spend a lot of time waiting + * for the GPU, and this will otherwise block other workqueue tasks(?) + * At this point we use only a single-threaded workqueue. + * @work: The workqueue callback for the shrink queue. + * @lock: Lock to protect the @shrink - and the memory accounting members, + * that is, essentially the whole structure with some exceptions. + * @lower_mem_limit: include lower limit of swap space and lower limit of + * system memory. + * @zones: Array of pointers to accounting zones. + * @num_zones: Number of populated entries in the @zones array. + * @zone_kernel: Pointer to the kernel zone. + * @zone_highmem: Pointer to the highmem zone if there is one. + * @zone_dma32: Pointer to the dma32 zone if there is one. + * + * Note that this structure is not per device. It should be global for all + * graphics devices. + */ + +#define TTM_MEM_MAX_ZONES 2 +struct ttm_mem_zone; +extern struct ttm_mem_global { + struct kobject kobj; + struct workqueue_struct *swap_queue; + struct work_struct work; + spinlock_t lock; + uint64_t lower_mem_limit; + struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; + unsigned int num_zones; + struct ttm_mem_zone *zone_kernel; +#ifdef CONFIG_HIGHMEM + struct ttm_mem_zone *zone_highmem; +#else + struct ttm_mem_zone *zone_dma32; +#endif +} ttm_mem_glob; + +int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev); +void ttm_mem_global_release(struct ttm_mem_global *glob); +int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, + struct ttm_operation_ctx *ctx); +void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount); +int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, + struct page *page, uint64_t size, + struct ttm_operation_ctx *ctx); +void ttm_mem_global_free_page(struct ttm_mem_global *glob, + struct page *page, uint64_t size); +size_t ttm_round_pot(size_t size); +bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages, + struct ttm_operation_ctx *ctx); +#endif diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 0fe869d0fad1..112394dd0ab6 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -42,6 +42,14 @@ */ +#define pr_fmt(fmt) "[TTM] " fmt + +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include "ttm_object.h" + /** * struct ttm_object_file * @@ -55,16 +63,9 @@ * * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, * for fast lookup of ref objects given a base object. + * + * @refcount: reference/usage count */ - -#define pr_fmt(fmt) "[TTM] " fmt - -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/slab.h> -#include <linux/atomic.h> -#include "ttm_object.h" - struct ttm_object_file { struct ttm_object_device *tdev; spinlock_t lock; @@ -73,7 +74,7 @@ struct ttm_object_file { struct kref refcount; }; -/** +/* * struct ttm_object_device * * @object_lock: lock that protects the object_hash hash table. @@ -96,7 +97,7 @@ struct ttm_object_device { struct idr idr; }; -/** +/* * struct ttm_ref_object * * @hash: Hash entry for the per-file object reference hash. @@ -568,7 +569,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev) /** * get_dma_buf_unless_doomed - get a dma_buf reference if possible. * - * @dma_buf: Non-refcounted pointer to a struct dma-buf. + * @dmabuf: Non-refcounted pointer to a struct dma-buf. * * Obtain a file reference from a lookup structure that doesn't refcount * the file, but synchronizes with its release method to make sure it has diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h index ede26df87c93..49b064f0cb19 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.h +++ b/drivers/gpu/drm/vmwgfx/ttm_object.h @@ -43,7 +43,8 @@ #include <linux/rcupdate.h> #include <drm/drm_hashtab.h> -#include <drm/ttm/ttm_memory.h> + +#include "ttm_memory.h" /** * enum ttm_ref_type diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c index 180f6dbc9460..81f525a82b77 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c @@ -330,6 +330,8 @@ static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi) * * @cbs: Pointer to the context binding state tracker. * @bi: Information about the binding to track. + * @shader_slot: The shader slot of the binding. + * @slot: The slot of the binding. * * Starts tracking the binding in the context binding * state structure @cbs. @@ -367,6 +369,7 @@ void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot, * vmw_binding_transfer: Transfer a context binding tracking entry. * * @cbs: Pointer to the persistent context binding state tracker. + * @from: Staged binding info built during execbuf * @bi: Information about the binding to track. * */ @@ -484,9 +487,8 @@ void vmw_binding_res_list_scrub(struct list_head *head) /** * vmw_binding_state_commit - Commit staged binding info * - * @ctx: Pointer to context to commit the staged binding info to. + * @to: Staged binding info area to copy into to. * @from: Staged binding info built during execbuf. - * @scrubbed: Transfer only scrubbed bindings. * * Transfers binding info from a temporary structure * (typically used by execbuf) to the persistent @@ -511,7 +513,7 @@ void vmw_binding_state_commit(struct vmw_ctx_binding_state *to, /** * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context * - * @ctx: The context resource + * @cbs: Pointer to the context binding state tracker. * * Walks through the context binding list and rebinds all scrubbed * resources. @@ -789,6 +791,7 @@ static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs, * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands * * @cbs: Pointer to the context's struct vmw_ctx_binding_state + * @shader_slot: The shader slot of the binding. */ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs, int shader_slot) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index 9f2779ddcf08..3a438ae4d3f4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -431,6 +431,7 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d, * @src_stride: Source stride in bytes. * @w: Width of blit. * @h: Height of blit. + * @diff: The struct vmw_diff_cpy used to track the modified bounding box. * return: Zero on success. Negative error value on failure. Will print out * kernel warnings on caller bugs. * @@ -465,13 +466,13 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, dma_resv_assert_held(src->base.resv); if (!ttm_tt_is_populated(dst->ttm)) { - ret = dst->bdev->driver->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); + ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); if (ret) return ret; } if (!ttm_tt_is_populated(src->ttm)) { - ret = src->bdev->driver->ttm_tt_populate(src->bdev, src->ttm, &ctx); + ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx); if (ret) return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 63dbc44eebe0..50e529a01677 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -131,7 +131,6 @@ err: * * @dev_priv: Driver private. * @buf: DMA buffer to move. - * @pin: Pin buffer if true. * @interruptible: Use interruptible wait. * Return: Zero on success, Negative error code on failure. In particular * -ERESTARTSYS if interrupted by a signal @@ -508,11 +507,16 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, acc_size = ttm_round_pot(sizeof(*bo)); acc_size += ttm_round_pot(npages * sizeof(void *)); acc_size += ttm_round_pot(sizeof(struct ttm_tt)); + + ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); + if (unlikely(ret)) + goto error_free; + ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size, ttm_bo_type_device, placement, 0, - &ctx, acc_size, NULL, NULL, NULL); + &ctx, NULL, NULL, NULL); if (unlikely(ret)) - goto error_free; + goto error_account; ttm_bo_pin(bo); ttm_bo_unreserve(bo); @@ -520,6 +524,9 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size, return 0; +error_account: + ttm_mem_global_free(&ttm_mem_glob, acc_size); + error_free: kfree(bo); return ret; @@ -546,7 +553,7 @@ int vmw_bo_init(struct vmw_private *dev_priv, void (*bo_free)(struct ttm_buffer_object *bo)) { struct ttm_operation_ctx ctx = { interruptible, false }; - struct ttm_bo_device *bdev = &dev_priv->bdev; + struct ttm_device *bdev = &dev_priv->bdev; size_t acc_size; int ret; bool user = (bo_free == &vmw_user_bo_destroy); @@ -559,11 +566,17 @@ int vmw_bo_init(struct vmw_private *dev_priv, vmw_bo->base.priority = 3; vmw_bo->res_tree = RB_ROOT; + ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); + if (unlikely(ret)) + return ret; + ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size, ttm_bo_type_device, placement, - 0, &ctx, acc_size, NULL, NULL, bo_free); - if (unlikely(ret)) + 0, &ctx, NULL, NULL, bo_free); + if (unlikely(ret)) { + ttm_mem_global_free(&ttm_mem_glob, acc_size); return ret; + } if (pin) ttm_bo_pin(&vmw_bo->base); @@ -635,6 +648,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base, * @handle: Pointer to where the handle value should be assigned. * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer * should be assigned. + * @p_base: The TTM base object pointer about to be allocated. * Return: Zero on success, negative error code on error. */ int vmw_user_bo_alloc(struct vmw_private *dev_priv, @@ -1058,7 +1072,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile, void vmw_bo_fence_single(struct ttm_buffer_object *bo, struct vmw_fence_obj *fence) { - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c index 7400d617ae3c..20246a7c97c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c @@ -276,7 +276,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, return ret; } -/** +/* * Reserve @bytes number of bytes in the fifo. * * This function will return NULL (error) on two conditions: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 45fbc41440f1..2e23e537cdf5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -48,6 +48,7 @@ * @hw_submitted: List of command buffers submitted to hardware. * @preempted: List of preempted command buffers. * @num_hw_submitted: Number of buffers currently being processed by hardware + * @block_submission: Identifies a block command submission. */ struct vmw_cmdbuf_context { struct list_head submitted; @@ -58,7 +59,7 @@ struct vmw_cmdbuf_context { }; /** - * struct vmw_cmdbuf_man: - Command buffer manager + * struct vmw_cmdbuf_man - Command buffer manager * * @cur_mutex: Mutex protecting the command buffer used for incremental small * kernel command submissions, @cur. @@ -88,7 +89,7 @@ struct vmw_cmdbuf_context { * @max_hw_submitted: Max number of in-flight command buffers the device can * handle. Immutable. * @lock: Spinlock protecting command submission queues. - * @header: Pool of DMA memory for device command buffer headers. + * @headers: Pool of DMA memory for device command buffer headers. * Internal protection. * @dheaders: Pool of DMA memory for device command buffer headers with trailing * space for inline data. Internal protection. @@ -143,7 +144,7 @@ struct vmw_cmdbuf_man { * @cb_context: The device command buffer context. * @list: List head for attaching to the manager lists. * @node: The range manager node. - * @handle. The DMA address of @cb_header. Handed to the device on command + * @handle: The DMA address of @cb_header. Handed to the device on command * buffer submission. * @cmd: Pointer to the command buffer space of this buffer. * @size: Size of the command buffer space of this buffer. @@ -249,7 +250,7 @@ static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header) * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its * associated structures. * - * header: Pointer to the header to free. + * @header: Pointer to the header to free. * * For internal use. Must be called with man::lock held. */ @@ -365,10 +366,11 @@ static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, } /** - * vmw_cmdbuf_ctx_submit: Process a command buffer context. + * vmw_cmdbuf_ctx_process - Process a command buffer context. * * @man: The command buffer manager. * @ctx: The command buffer context. + * @notempty: Pass back count of non-empty command submitted lists. * * Submit command buffers to hardware if possible, and process finished * buffers. Typically freeing them, but on preemption or error take @@ -1161,6 +1163,7 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, * context. * * @man: The command buffer manager. + * @context: Device context to pass command through. * * Synchronously sends a preempt command. */ @@ -1184,6 +1187,7 @@ static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context) * context. * * @man: The command buffer manager. + * @context: Device context to start/stop. * @enable: Whether to enable or disable the context. * * Synchronously sends a device start / stop context command. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 44d858ce4ce7..b262d61d839d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -69,7 +69,7 @@ struct vmw_cmdbuf_res_manager { * vmw_cmdbuf_res_lookup - Look up a command buffer resource * * @man: Pointer to the command buffer resource manager - * @resource_type: The resource type, that combined with the user key + * @res_type: The resource type, that combined with the user key * identifies the resource. * @user_key: The user key. * @@ -148,7 +148,6 @@ void vmw_cmdbuf_res_commit(struct list_head *list) /** * vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions * - * @man: Pointer to the command buffer resource manager * @list: Caller's list of command buffer resource action * * This function reverts a list of command buffer resource @@ -160,7 +159,6 @@ void vmw_cmdbuf_res_commit(struct list_head *list) void vmw_cmdbuf_res_revert(struct list_head *list) { struct vmw_cmdbuf_res *entry, *next; - int ret; list_for_each_entry_safe(entry, next, list, head) { switch (entry->state) { @@ -168,8 +166,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list) vmw_cmdbuf_res_free(entry->man, entry); break; case VMW_CMDBUF_RES_DEL: - ret = drm_ht_insert_item(&entry->man->resources, - &entry->hash); + drm_ht_insert_item(&entry->man->resources, &entry->hash); list_del(&entry->head); list_add_tail(&entry->head, &entry->man->list); entry->state = VMW_CMDBUF_RES_COMMITTED; @@ -327,7 +324,6 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) } /** - * * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed * resource manager * diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 6f4d0da11ad8..4a5a3e246216 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -112,7 +112,7 @@ static const struct vmw_res_func vmw_dx_context_func = { .unbind = vmw_dx_context_unbind }; -/** +/* * Context management: */ @@ -672,7 +672,7 @@ static int vmw_dx_context_destroy(struct vmw_resource *res) return 0; } -/** +/* * User-space context management: */ @@ -698,7 +698,7 @@ static void vmw_user_context_free(struct vmw_resource *res) vmw_user_context_size); } -/** +/* * This function is called when user space has no more references on the * base object. It releases the base-object's reference on the resource object. */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index ba658fa9cf6c..d782b49c7236 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -63,6 +63,7 @@ struct vmw_cotable { * @min_initial_entries: Min number of initial intries at cotable allocation * for this cotable type. * @size: Size of each entry. + * @unbind_func: Unbind call-back function. */ struct vmw_cotable_info { u32 min_initial_entries; @@ -297,7 +298,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback) * * @res: Pointer to the cotable resource. * @readback: Whether to read back cotable data to the backup buffer. - * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller + * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller * for convenience / fencing. * * Unbinds the cotable from the device and fences the backup buffer. @@ -481,11 +482,15 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) vmw_bo_unreference(&old_buf); res->id = vcotbl->type; + /* Release the pin acquired in vmw_bo_init */ + ttm_bo_unpin(bo); + return 0; out_map_new: ttm_bo_kunmap(&old_map); out_wait: + ttm_bo_unpin(bo); ttm_bo_unreserve(bo); vmw_bo_unreference(&buf); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index dd69b51c40e4..399f70d340eb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -47,12 +47,6 @@ #define VMW_MIN_INITIAL_WIDTH 800 #define VMW_MIN_INITIAL_HEIGHT 600 -#ifndef VMWGFX_GIT_VERSION -#define VMWGFX_GIT_VERSION "Unknown" -#endif - -#define VMWGFX_REPO "In Tree" - #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) @@ -153,7 +147,7 @@ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \ struct drm_vmw_msg_arg) -/** +/* * The core DRM version of this macro doesn't account for * DRM_COMMAND_BASE. */ @@ -161,7 +155,7 @@ #define VMW_IOCTL_DEF(ioctl, func, flags) \ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func} -/** +/* * Ioctl definitions. */ @@ -526,7 +520,7 @@ static void vmw_release_device_late(struct vmw_private *dev_priv) vmw_fifo_release(dev_priv, &dev_priv->fifo); } -/** +/* * Sets the initial_[width|height] fields on the given vmw_private. * * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then @@ -599,7 +593,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) /** * vmw_dma_masks - set required page- and dma masks * - * @dev: Pointer to struct drm-device + * @dev_priv: Pointer to struct drm-device * * With 32-bit we can only handle 32 bit PFNs. Optionally set that * restriction also for 64-bit systems. @@ -712,17 +706,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->last_read_seqno = (uint32_t) -100; dev_priv->drm.dev_private = dev_priv; - ret = vmw_setup_pci_resources(dev_priv, pci_id); - if (ret) - return ret; - ret = vmw_detect_version(dev_priv); - if (ret) - goto out_no_pci_or_version; - mutex_init(&dev_priv->cmdbuf_mutex); - mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->binding_mutex); - mutex_init(&dev_priv->global_kms_state_mutex); ttm_lock_init(&dev_priv->reservation_sem); spin_lock_init(&dev_priv->resource_lock); spin_lock_init(&dev_priv->hw_lock); @@ -730,6 +715,14 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) spin_lock_init(&dev_priv->cap_lock); spin_lock_init(&dev_priv->cursor_lock); + ret = vmw_setup_pci_resources(dev_priv, pci_id); + if (ret) + return ret; + ret = vmw_detect_version(dev_priv); + if (ret) + goto out_no_pci_or_version; + + for (i = vmw_res_context; i < vmw_res_max; ++i) { idr_init(&dev_priv->res_idr[i]); INIT_LIST_HEAD(&dev_priv->res_lru[i]); @@ -885,12 +878,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) drm_vma_offset_manager_init(&dev_priv->vma_manager, DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE); - ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver, - dev_priv->drm.dev, - dev_priv->drm.anon_inode->i_mapping, - &dev_priv->vma_manager, - dev_priv->map_mode == vmw_dma_alloc_coherent, - false); + ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver, + dev_priv->drm.dev, + dev_priv->drm.anon_inode->i_mapping, + &dev_priv->vma_manager, + dev_priv->map_mode == vmw_dma_alloc_coherent, + false); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing TTM buffer object driver.\n"); goto out_no_bdev; @@ -967,8 +960,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) if (ret) goto out_no_fifo; - DRM_INFO("Atomic: %s\n", (dev_priv->drm.driver->driver_features & DRIVER_ATOMIC) - ? "yes." : "no."); if (dev_priv->sm_type == VMW_SM_5) DRM_INFO("SM5 support available.\n"); if (dev_priv->sm_type == VMW_SM_4_1) @@ -976,11 +967,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) if (dev_priv->sm_type == VMW_SM_4) DRM_INFO("SM4 support available.\n"); - snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s", - VMWGFX_REPO, VMWGFX_GIT_VERSION); - vmw_host_log(host_log); - - memset(host_log, 0, sizeof(host_log)); snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d", VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, VMWGFX_DRIVER_PATCHLEVEL); @@ -1007,7 +993,7 @@ out_no_kms: vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR); vmw_vram_manager_fini(dev_priv); out_no_vram: - (void)ttm_bo_device_release(&dev_priv->bdev); + ttm_device_fini(&dev_priv->bdev); out_no_bdev: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: @@ -1054,7 +1040,7 @@ static void vmw_driver_unload(struct drm_device *dev) if (dev_priv->has_mob) vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB); vmw_vram_manager_fini(dev_priv); - (void) ttm_bo_device_release(&dev_priv->bdev); + ttm_device_fini(&dev_priv->bdev); drm_vma_offset_manager_destroy(&dev_priv->vma_manager); vmw_release_device_late(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); @@ -1268,6 +1254,7 @@ static void vmw_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); + ttm_mem_global_release(&ttm_mem_glob); drm_dev_unregister(dev); vmw_driver_unload(dev); } @@ -1383,7 +1370,7 @@ static int vmw_pm_freeze(struct device *kdev) vmw_execbuf_release_pinned_bo(dev_priv); vmw_resource_evict_all(dev_priv); vmw_release_device_early(dev_priv); - while (ttm_bo_swapout(&ctx) == 0); + while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0); if (dev_priv->enable_fb) vmw_fifo_resource_dec(dev_priv); if (atomic_read(&dev_priv->num_fifo_resources) != 0) { @@ -1516,9 +1503,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (IS_ERR(vmw)) return PTR_ERR(vmw); - vmw->drm.pdev = pdev; pci_set_drvdata(pdev, &vmw->drm); + ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev); + if (ret) + return ret; + ret = vmw_driver_load(vmw, ent->device); if (ret) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 5fa5bcd20cc5..c6b1eb5952bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -55,10 +55,10 @@ #define VMWGFX_DRIVER_NAME "vmwgfx" -#define VMWGFX_DRIVER_DATE "20200114" +#define VMWGFX_DRIVER_DATE "20210218" #define VMWGFX_DRIVER_MAJOR 2 #define VMWGFX_DRIVER_MINOR 18 -#define VMWGFX_DRIVER_PATCHLEVEL 0 +#define VMWGFX_DRIVER_PATCHLEVEL 1 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_MAX_RELOCATIONS 2048 #define VMWGFX_MAX_VALIDATIONS 2048 @@ -484,7 +484,7 @@ enum vmw_sm_type { struct vmw_private { struct drm_device drm; - struct ttm_bo_device bdev; + struct ttm_device bdev; struct vmw_fifo_state fifo; @@ -529,7 +529,6 @@ struct vmw_private { struct vmw_overlay *overlay_priv; struct drm_property *hotplug_mode_update_property; struct drm_property *implicit_placement_property; - struct mutex global_kms_state_mutex; spinlock_t cursor_lock; struct drm_atomic_state *suspend_state; @@ -592,7 +591,6 @@ struct vmw_private { bool refuse_hibernation; bool suspend_locked; - struct mutex release_mutex; atomic_t num_fifo_resources; /* @@ -775,7 +773,8 @@ extern void vmw_resource_unreserve(struct vmw_resource *res, struct vmw_buffer_object *new_backup, unsigned long new_backup_offset); extern void vmw_query_move_notify(struct ttm_buffer_object *bo, - struct ttm_resource *mem); + struct ttm_resource *old_mem, + struct ttm_resource *new_mem); extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); extern void vmw_resource_evict_all(struct vmw_private *dev_priv); extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); @@ -999,7 +998,7 @@ extern struct ttm_placement vmw_evictable_placement; extern struct ttm_placement vmw_srf_placement; extern struct ttm_placement vmw_mob_placement; extern struct ttm_placement vmw_nonfixed_placement; -extern struct ttm_bo_driver vmw_bo_driver; +extern struct ttm_device_funcs vmw_bo_driver; extern const struct vmw_sg_table * vmw_bo_sg_table(struct ttm_buffer_object *bo); extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv, @@ -1524,9 +1523,8 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) struct vmw_buffer_object *tmp_buf = *buf; *buf = NULL; - if (tmp_buf != NULL) { + if (tmp_buf != NULL) ttm_bo_put(&tmp_buf->base); - } } static inline struct vmw_buffer_object * diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 462f17320708..7a24196f92c3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -80,7 +80,8 @@ struct vmw_relocation { * with a NOP. * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after * validation is -1, the command is replaced with a NOP. Otherwise no action. - */ + * @vmw_res_rel_max: Last value in the enum - used for error checking +*/ enum vmw_resource_relocation_type { vmw_res_rel_normal, vmw_res_rel_nop, @@ -122,9 +123,11 @@ struct vmw_ctx_validation_info { /** * struct vmw_cmd_entry - Describe a command for the verifier * + * @func: Call-back to handle the command. * @user_allow: Whether allowed from the execbuf ioctl. * @gb_disable: Whether disabled if guest-backed objects are available. * @gb_enable: Whether enabled iff guest-backed objects are available. + * @cmd_name: Name of the command. */ struct vmw_cmd_entry { int (*func) (struct vmw_private *, struct vmw_sw_context *, @@ -203,6 +206,7 @@ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) * * @dev_priv: Pointer to the device private: * @sw_context: The command submission context + * @res: Pointer to the resource * @node: The validation node holding the context resource metadata */ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, @@ -509,7 +513,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, /** * vmw_resource_relocation_add - Add a relocation to the relocation list * - * @list: Pointer to head of relocation list. + * @sw_context: Pointer to the software context. * @res: The resource. * @offset: Offset into the command buffer currently being parsed where the id * that needs fixup is located. Granularity is one byte. @@ -639,7 +643,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) * @converter: User-space visisble type specific information. * @id_loc: Pointer to the location in the command buffer currently being parsed * from where the user-space resource id handle is located. - * @p_val: Pointer to pointer to resource validalidation node. Populated on + * @p_res: Pointer to pointer to resource validalidation node. Populated on * exit. */ static int @@ -1700,7 +1704,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. - * @val_node: The validation node representing the resource. + * @res: Pointer to the resource. * @buf_id: Pointer to the user-space backup buffer handle in the command * stream. * @backup_offset: Offset of backup into MOB. @@ -3739,7 +3743,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, return 0; } -/** +/* * vmw_execbuf_fence_commands - create and submit a command stream fence * * Creates a fence object and submits a command stream marker. @@ -3939,9 +3943,9 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, * On successful return, the function returns a pointer to the data in the * command buffer and *@header is set to non-NULL. * - * If command buffers could not be used, the function will return the value of - * @kernel_commands on function call. That value may be NULL. In that case, the - * value of *@header will be set to NULL. + * @kernel_commands: If command buffers could not be used, the function will + * return the value of @kernel_commands on function call. That value may be + * NULL. In that case, the value of *@header will be set to NULL. * * If an error is encountered, the function will return a pointer error value. * If the function is interrupted by a signal while sleeping, it will return diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 378ec7600154..23523eb3cac2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -58,13 +58,11 @@ struct vmw_user_fence { /** * struct vmw_event_fence_action - fence action that delivers a drm event. * - * @e: A struct drm_pending_event that controls the event delivery. * @action: A struct vmw_fence_action to hook up to a fence. + * @event: A pointer to the pending event. * @fence: A referenced pointer to the fence to keep it alive while @action * hangs on it. * @dev: Pointer to a struct drm_device so we can access the event stuff. - * @kref: Both @e and @action has destructors, so we need to refcount. - * @size: Size accounted for this object. * @tv_sec: If non-null, the variable pointed to will be assigned * current time tv_sec val when the fence signals. * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will @@ -87,7 +85,7 @@ fman_from_fence(struct vmw_fence_obj *fence) return container_of(fence->base.lock, struct vmw_fence_manager, lock); } -/** +/* * Note on fencing subsystem usage of irqs: * Typically the vmw_fences_update function is called * @@ -250,7 +248,7 @@ static const struct dma_fence_ops vmw_fence_ops = { }; -/** +/* * Execute signal actions on fences recently signaled. * This is done from a workqueue so we don't have to execute * signal actions from atomic context. @@ -708,7 +706,7 @@ int vmw_wait_dma_fence(struct vmw_fence_manager *fman, } -/** +/* * vmw_fence_fifo_down - signal all unsignaled fence objects. */ @@ -948,8 +946,8 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) /** * vmw_fence_obj_add_action - Add an action to a fence object. * - * @fence - The fence object. - * @action - The action to add. + * @fence: The fence object. + * @action: The action to add. * * Note that the action callbacks may be executed before this function * returns. @@ -1001,6 +999,10 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, * @fence: The fence object on which to post the event. * @event: Event to be posted. This event should've been alloced * using k[mz]alloc, and should've been completely initialized. + * @tv_sec: If non-null, the variable pointed to will be assigned + * current time tv_sec val when the fence signals. + * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will + * be assigned the current time tv_usec val when the fence signals. * @interruptible: Interruptible waits if possible. * * As a side effect, the object pointed to by @event may have been diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 80af8772b8c2..b36032964b2f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -437,7 +437,7 @@ __poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait) * @filp: See the linux fops read documentation. * @buffer: See the linux fops read documentation. * @count: See the linux fops read documentation. - * offset: See the linux fops read documentation. + * @offset: See the linux fops read documentation. * * Wrapper around the drm_read function that makes sure the device is * processing the fifo if drm_read decides to wait. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 9a89f658e501..abbca8b0b3c5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -370,12 +370,16 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc; struct vmw_private *dev_priv = vmw_priv(crtc->dev); struct vmw_display_unit *du = vmw_crtc_to_du(crtc); - struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state); + struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); s32 hotspot_x, hotspot_y; int ret = 0; @@ -383,9 +387,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, hotspot_x = du->hotspot_x; hotspot_y = du->hotspot_y; - if (plane->state->fb) { - hotspot_x += plane->state->fb->hot_x; - hotspot_y += plane->state->fb->hot_y; + if (new_state->fb) { + hotspot_x += new_state->fb->hot_x; + hotspot_y += new_state->fb->hot_y; } du->cursor_surface = vps->surf; @@ -400,8 +404,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, hotspot_y); } else if (vps->bo) { ret = vmw_cursor_update_bo(dev_priv, vps->bo, - plane->state->crtc_w, - plane->state->crtc_h, + new_state->crtc_w, + new_state->crtc_h, hotspot_x, hotspot_y); } else { vmw_cursor_update_position(dev_priv, false, 0, 0); @@ -409,8 +413,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, } if (!ret) { - du->cursor_x = plane->state->crtc_x + du->set_gui_x; - du->cursor_y = plane->state->crtc_y + du->set_gui_y; + du->cursor_x = new_state->crtc_x + du->set_gui_x; + du->cursor_y = new_state->crtc_y + du->set_gui_y; vmw_cursor_update_position(dev_priv, true, du->cursor_x + hotspot_x, @@ -437,26 +441,28 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, * Returns 0 on success */ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct drm_crtc_state *crtc_state = NULL; - struct drm_framebuffer *new_fb = state->fb; + struct drm_framebuffer *new_fb = new_state->fb; int ret; - if (state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc); + if (new_state->crtc) + crtc_state = drm_atomic_get_new_crtc_state(state, + new_state->crtc); - ret = drm_atomic_helper_check_plane_state(state, crtc_state, + ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, DRM_PLANE_HELPER_NO_SCALING, DRM_PLANE_HELPER_NO_SCALING, false, true); if (!ret && new_fb) { - struct drm_crtc *crtc = state->crtc; - struct vmw_connector_state *vcs; + struct drm_crtc *crtc = new_state->crtc; struct vmw_display_unit *du = vmw_crtc_to_du(crtc); - vcs = vmw_connector_state_to_vcs(du->connector.state); + vmw_connector_state_to_vcs(du->connector.state); } @@ -468,7 +474,7 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, * vmw_du_cursor_plane_atomic_check - check if the new state is okay * * @plane: cursor plane - * @state: info on the new plane state + * @new_state: info on the new plane state * * This is a chance to fail if the new cursor state does not fit * our requirements. @@ -476,8 +482,10 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, * Returns 0 on success */ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *new_state) + struct drm_atomic_state *state) { + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); int ret = 0; struct drm_crtc_state *crtc_state = NULL; struct vmw_surface *surface = NULL; @@ -891,7 +899,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, struct vmw_framebuffer_surface *vfbs; enum SVGA3dSurfaceFormat format; int ret; - struct drm_format_name_buf format_name; /* 3D is only supported on HWv8 and newer hosts */ if (dev_priv->active_display_unit == vmw_du_legacy) @@ -929,8 +936,8 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, format = SVGA3D_A1R5G5B5; break; default: - DRM_ERROR("Invalid pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); + DRM_ERROR("Invalid pixel format: %p4cc\n", + &mode_cmd->pixel_format); return -EINVAL; } @@ -1058,7 +1065,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { .dirty = vmw_framebuffer_bo_dirty_ext, }; -/** +/* * Pin the bofer in a location suitable for access by the * display system. */ @@ -1145,7 +1152,6 @@ static int vmw_create_bo_proxy(struct drm_device *dev, uint32_t format; struct vmw_resource *res; unsigned int bytes_pp; - struct drm_format_name_buf format_name; int ret; switch (mode_cmd->pixel_format) { @@ -1167,8 +1173,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev, break; default: - DRM_ERROR("Invalid framebuffer format %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); + DRM_ERROR("Invalid framebuffer format %p4cc\n", + &mode_cmd->pixel_format); return -EINVAL; } @@ -1212,7 +1218,6 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, struct drm_device *dev = &dev_priv->drm; struct vmw_framebuffer_bo *vfbd; unsigned int requested_size; - struct drm_format_name_buf format_name; int ret; requested_size = mode_cmd->height * mode_cmd->pitches[0]; @@ -1232,8 +1237,8 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, case DRM_FORMAT_RGB565: break; default: - DRM_ERROR("Invalid pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format, &format_name)); + DRM_ERROR("Invalid pixel format: %p4cc\n", + &mode_cmd->pixel_format); return -EINVAL; } } @@ -1268,6 +1273,7 @@ out_err1: /** * vmw_kms_srf_ok - check if a surface can be created * + * @dev_priv: Pointer to device private struct. * @width: requested width * @height: requested height * @@ -1779,10 +1785,6 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv) drm_property_create_range(&dev_priv->drm, DRM_MODE_PROP_IMMUTABLE, "hotplug_mode_update", 0, 1); - - if (!dev_priv->hotplug_mode_update_property) - return; - } int vmw_kms_init(struct vmw_private *dev_priv) @@ -1897,7 +1899,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, } -/** +/* * Function called by DRM code called with vbl_lock held. */ u32 vmw_get_vblank_counter(struct drm_crtc *crtc) @@ -1905,7 +1907,7 @@ u32 vmw_get_vblank_counter(struct drm_crtc *crtc) return 0; } -/** +/* * Function called by DRM code called with vbl_lock held. */ int vmw_enable_vblank(struct drm_crtc *crtc) @@ -1913,7 +1915,7 @@ int vmw_enable_vblank(struct drm_crtc *crtc) return -EINVAL; } -/** +/* * Function called by DRM code called with vbl_lock held. */ void vmw_disable_vblank(struct drm_crtc *crtc) @@ -2057,6 +2059,10 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, 1344, 1600, 0, 864, 865, 868, 900, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1280x720@60Hz */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344, + 1472, 1664, 0, 720, 723, 728, 748, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x768@60Hz */ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, 1472, 1664, 0, 768, 771, 778, 798, 0, @@ -2101,6 +2107,10 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 1920x1080@60Hz */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048, + 2248, 2576, 0, 1080, 1083, 1088, 1120, 0, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1920x1200@60Hz */ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, @@ -2109,10 +2119,26 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 2560x1440@60Hz */ + { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608, + 2640, 2720, 0, 1440, 1443, 1448, 1481, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 2560x1600@60Hz */ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, + /* 2880x1800@60Hz */ + { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928, + 2960, 3040, 0, 1800, 1803, 1809, 1852, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 3840x2160@60Hz */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888, + 3920, 4000, 0, 2160, 2163, 2168, 2222, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, + /* 3840x2400@60Hz */ + { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888, + 3920, 4000, 0, 2400, 2403, 2409, 2469, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* Terminate */ { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, }; @@ -2121,7 +2147,7 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = { * vmw_guess_mode_timing - Provide fake timings for a * 60Hz vrefresh mode. * - * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay + * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay * members filled in. */ void vmw_guess_mode_timing(struct drm_display_mode *mode) @@ -2176,6 +2202,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, mode->hdisplay = du->pref_width; mode->vdisplay = du->pref_height; vmw_guess_mode_timing(mode); + drm_mode_set_name(mode); if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * assumed_bpp, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 03f3694015ce..bbc809f7bd8a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -245,7 +245,7 @@ struct vmw_framebuffer_bo { }; -static const uint32_t vmw_primary_plane_formats[] = { +static const uint32_t __maybe_unused vmw_primary_plane_formats[] = { DRM_FORMAT_XRGB1555, DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, @@ -253,7 +253,7 @@ static const uint32_t vmw_primary_plane_formats[] = { DRM_FORMAT_ARGB8888, }; -static const uint32_t vmw_cursor_plane_formats[] = { +static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = { DRM_FORMAT_ARGB8888, }; @@ -456,11 +456,11 @@ void vmw_du_cursor_plane_destroy(struct drm_plane *plane); /* Atomic Helpers */ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state); + struct drm_atomic_state *state); int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state); + struct drm_atomic_state *state); void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state); + struct drm_atomic_state *state); int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state); void vmw_du_plane_cleanup_fb(struct drm_plane *plane, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 9a9508edbc9e..87e0b303d900 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -49,7 +49,7 @@ struct vmw_legacy_display { struct vmw_framebuffer *fb; }; -/** +/* * Display unit using the legacy register interface. */ struct vmw_legacy_display_unit { @@ -206,6 +206,7 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc) * vmw_ldu_crtc_atomic_enable - Noop * * @crtc: CRTC associated with the new screen + * @state: Unused * * This is called after a mode set has been completed. Here's * usually a good place to call vmw_ldu_add_active/vmw_ldu_del_active @@ -221,6 +222,7 @@ static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc, * vmw_ldu_crtc_atomic_disable - Turns off CRTC * * @crtc: CRTC to be turned off + * @state: Unused */ static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) @@ -282,18 +284,22 @@ drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = { static void vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); struct vmw_private *dev_priv; struct vmw_legacy_display_unit *ldu; struct vmw_framebuffer *vfb; struct drm_framebuffer *fb; - struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; + struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc; ldu = vmw_crtc_to_ldu(crtc); dev_priv = vmw_priv(plane->dev); - fb = plane->state->fb; + fb = new_state->fb; vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index a372980fe6a5..5648664f71bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -94,6 +94,16 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, struct vmw_piter data_iter, unsigned long num_data_pages); + +static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo) +{ + int ret = ttm_bo_reserve(bo, false, true, NULL); + BUG_ON(ret != 0); + ttm_bo_unpin(bo); + ttm_bo_unreserve(bo); +} + + /* * vmw_setup_otable_base - Issue an object table base setup command to * the device @@ -277,6 +287,7 @@ out_no_setup: &batch->otables[i]); } + vmw_bo_unpin_unlocked(batch->otable_bo); ttm_bo_put(batch->otable_bo); batch->otable_bo = NULL; return ret; @@ -340,8 +351,10 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, BUG_ON(ret != 0); vmw_bo_fence_single(bo, NULL); + ttm_bo_unpin(bo); ttm_bo_unreserve(bo); + ttm_bo_unpin(batch->otable_bo); ttm_bo_put(batch->otable_bo); batch->otable_bo = NULL; } @@ -528,6 +541,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, void vmw_mob_destroy(struct vmw_mob *mob) { if (mob->pt_bo) { + vmw_bo_unpin_unlocked(mob->pt_bo); ttm_bo_put(mob->pt_bo); mob->pt_bo = NULL; } @@ -643,6 +657,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv, out_no_cmd_space: vmw_fifo_resource_dec(dev_priv); if (pt_set_up) { + vmw_bo_unpin_unlocked(mob->pt_bo); ttm_bo_put(mob->pt_bo); mob->pt_bo = NULL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 15b5bde69324..609269625468 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -253,7 +253,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply, * vmw_send_msg: Sends a message to the host * * @channel: RPC channel - * @logmsg: NULL terminated string + * @msg: NULL terminated string * * Returns: 0 on success */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index d6d282c13b7f..ac4a9b722279 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -42,7 +42,7 @@ struct vmw_stream { struct drm_vmw_control_stream_arg saved; }; -/** +/* * Overlay control */ struct vmw_overlay { @@ -85,7 +85,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd, cmd->flush.streamId = stream_id; } -/** +/* * Send put command to hw. * * Returns @@ -174,7 +174,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv, return 0; } -/** +/* * Send stop command to hw. * * Returns @@ -216,7 +216,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, return 0; } -/** +/* * Move a buffer to vram or gmr if @pin is set, else unpin the buffer. * * With the introduction of screen objects buffers could now be @@ -235,7 +235,7 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter); } -/** +/* * Stop or pause a stream. * * If the stream is paused the no evict flag is removed from the buffer @@ -285,7 +285,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv, return 0; } -/** +/* * Update a stream and send any put or stop fifo commands needed. * * The caller must hold the overlay lock. @@ -353,7 +353,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, return 0; } -/** +/* * Try to resume all paused streams. * * Used by the kms code after moving a new scanout buffer to vram. @@ -387,7 +387,7 @@ int vmw_overlay_resume_all(struct vmw_private *dev_priv) return 0; } -/** +/* * Pauses all active streams. * * Used by the kms code when moving a new scanout buffer to vram. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index d1e7b9608145..35f02958ee2c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -202,7 +202,6 @@ int vmw_resource_alloc_id(struct vmw_resource *res) * * @dev_priv: Pointer to a device private struct. * @res: The struct vmw_resource to initialize. - * @obj_type: Resource object type. * @delay_id: Boolean whether to defer device id allocation until * the first validation. * @res_free: Resource destructor. @@ -288,8 +287,6 @@ out_bad_resource: * @tfile: Pointer to a struct ttm_object_file identifying the caller * @handle: The TTM user-space handle * @converter: Pointer to an object describing the resource type - * @p_res: On successful return the location pointed to will contain - * a pointer to a refcounted struct vmw_resource. * * If the handle can't be found or is associated with an incorrect resource * type, -EINVAL will be returned. @@ -315,7 +312,7 @@ vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, return converter->base_obj_to_res(base); } -/** +/* * Helper function that looks either a surface or bo. * * The pointer this pointed at by out_surf and out_buf needs to be null. @@ -388,6 +385,7 @@ out_no_bo: * @res: The resource to make visible to the device. * @val_buf: Information about a buffer possibly * containing backup data if a bind operation is needed. + * @dirtying: Transfer dirty regions. * * On hardware resource shortage, this function returns -EBUSY and * should be retried once resources have been freed up. @@ -586,7 +584,7 @@ out_no_reserve: return ret; } -/** +/* * vmw_resource_reserve - Reserve a resource for command submission * * @res: The resource to reserve. @@ -849,16 +847,18 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob) * vmw_query_move_notify - Read back cached query states * * @bo: The TTM buffer object about to move. - * @mem: The memory region @bo is moving to. + * @old_mem: The memory region @bo is moving from. + * @new_mem: The memory region @bo is moving to. * * Called before the query MOB is swapped out to read back cached query * states from the device. */ void vmw_query_move_notify(struct ttm_buffer_object *bo, - struct ttm_resource *mem) + struct ttm_resource *old_mem, + struct ttm_resource *new_mem) { struct vmw_buffer_object *dx_query_mob; - struct ttm_bo_device *bdev = bo->bdev; + struct ttm_device *bdev = bo->bdev; struct vmw_private *dev_priv; @@ -873,7 +873,8 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo, } /* If BO is being moved from MOB to system memory */ - if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) { + if (new_mem->mem_type == TTM_PL_SYSTEM && + old_mem->mem_type == VMW_PL_MOB) { struct vmw_fence_obj *fence; (void) vmw_query_readback_all(dx_query_mob); @@ -973,7 +974,7 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv) mutex_unlock(&dev_priv->cmdbuf_mutex); } -/** +/* * vmw_resource_pin - Add a pin reference on a resource * * @res: The resource to add a pin reference on diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index b0db059b8cfb..9bc9a0714664 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -84,7 +84,7 @@ struct vmw_kms_sou_define_gmrfb { SVGAFifoCmdDefineGMRFB body; }; -/** +/* * Display unit using screen objects. */ struct vmw_screen_object_unit { @@ -112,7 +112,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc) vmw_sou_destroy(vmw_crtc_to_sou(crtc)); } -/** +/* * Send the fifo command to create a screen. */ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, @@ -160,7 +160,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, return 0; } -/** +/* * Send the fifo command to destroy a screen. */ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv, @@ -263,7 +263,7 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) /** * vmw_sou_crtc_helper_prepare - Noop * - * @crtc: CRTC associated with the new screen + * @crtc: CRTC associated with the new screen * * Prepares the CRTC for a mode set, but we don't need to do anything here. */ @@ -275,6 +275,7 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc) * vmw_sou_crtc_atomic_enable - Noop * * @crtc: CRTC associated with the new screen + * @state: Unused * * This is called after a mode set has been completed. */ @@ -287,6 +288,7 @@ static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc, * vmw_sou_crtc_atomic_disable - Turns off CRTC * * @crtc: CRTC to be turned off + * @state: Unused */ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) @@ -728,18 +730,20 @@ static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv, static void vmw_sou_primary_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct drm_crtc *crtc = plane->state->crtc; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc *crtc = new_state->crtc; struct drm_pending_vblank_event *event = NULL; struct vmw_fence_obj *fence = NULL; int ret; /* In case of device error, maintain consistent atomic state */ - if (crtc && plane->state->fb) { + if (crtc && new_state->fb) { struct vmw_private *dev_priv = vmw_priv(crtc->dev); struct vmw_framebuffer *vfb = - vmw_framebuffer_to_vfb(plane->state->fb); + vmw_framebuffer_to_vfb(new_state->fb); if (vfb->bo) ret = vmw_sou_plane_update_bo(dev_priv, plane, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 905ae50aaa2a..a0db06564013 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -125,7 +125,7 @@ static const struct vmw_res_func vmw_dx_shader_func = { .commit_notify = vmw_dx_shader_commit_notify, }; -/** +/* * Shader management: */ @@ -654,7 +654,7 @@ out_resource_init: -/** +/* * User-space shader management: */ @@ -686,7 +686,7 @@ static void vmw_shader_free(struct vmw_resource *res) vmw_shader_size); } -/** +/* * This function is called when user space has no more references on the * base object. It releases the base-object's reference on the resource object. */ @@ -945,13 +945,13 @@ int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, * vmw_compat_shader_add - Create a compat shader and stage it for addition * as a command buffer managed resource. * + * @dev_priv: Pointer to device private structure. * @man: Pointer to the compat shader manager identifying the shader namespace. * @user_key: The key that is used to identify the shader. The key is * unique to the shader type. * @bytecode: Pointer to the bytecode of the shader. * @shader_type: Shader type. - * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is - * to be created with. + * @size: Command size. * @list: Caller's list of staged command buffer resource actions. * */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c index 7369dd86d3a9..2877c7b43bd7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c @@ -42,6 +42,7 @@ /** * struct vmw_view - view metadata * + * @rcu: RCU callback head * @res: The struct vmw_resource we derive from * @ctx: Non-refcounted pointer to the context this view belongs to. * @srf: Refcounted pointer to the surface pointed to by this view. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index fbe977881364..7b11f0285786 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -61,6 +61,7 @@ enum stdu_content_type { * @bottom: Bottom side of bounding box. * @fb_left: Left side of the framebuffer/content bounding box * @fb_top: Top of the framebuffer/content bounding box + * @pitch: framebuffer pitch (stride) * @buf: buffer object when DMA-ing between buffer and screen targets. * @sid: Surface ID when copying between surface and screen targets. */ @@ -109,8 +110,11 @@ struct vmw_stdu_update_gb_image { * content_vfbs dimensions, then this is a pointer into the * corresponding field in content_vfbs. If not, then this * is a separate buffer to which content_vfbs will blit to. - * @content_type: content_fb type - * @defined: true if the current display unit has been initialized + * @content_fb_type: content_fb type + * @display_width: display width + * @display_height: display height + * @defined: true if the current display unit has been initialized + * @cpp: Bytes per pixel */ struct vmw_screen_target_display_unit { struct vmw_display_unit base; @@ -652,6 +656,7 @@ out_cleanup: * @file_priv: Pointer to a struct drm-file identifying the caller. May be * set to NULL, but then @user_fence_rep must also be set to NULL. * @vfb: Pointer to the buffer-object backed framebuffer. + * @user_fence_rep: User-space provided structure for fence information. * @clips: Array of clip rects. Either @clips or @vclips must be NULL. * @vclips: Alternate array of clip rects. Either @clips or @vclips must * be NULL. @@ -1575,10 +1580,12 @@ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv, */ static void vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_atomic_state *state) { - struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state); - struct drm_crtc *crtc = plane->state->crtc; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); + struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); + struct drm_crtc *crtc = new_state->crtc; struct vmw_screen_target_display_unit *stdu; struct drm_pending_vblank_event *event; struct vmw_fence_obj *fence = NULL; @@ -1586,9 +1593,9 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, int ret; /* If case of device error, maintain consistent atomic state */ - if (crtc && plane->state->fb) { + if (crtc && new_state->fb) { struct vmw_framebuffer *vfb = - vmw_framebuffer_to_vfb(plane->state->fb); + vmw_framebuffer_to_vfb(new_state->fb); stdu = vmw_crtc_to_stdu(crtc); dev_priv = vmw_priv(crtc->dev); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index f6cab77075a0..c3e55c1376eb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -41,10 +41,12 @@ /** * struct vmw_user_surface - User-space visible surface resource * + * @prime: The TTM prime object. * @base: The TTM base object handling user-space visibility. * @srf: The surface metadata. * @size: TTM accounting size for the surface. - * @master: master of the creating client. Used for security check. + * @master: Master of the creating client. Used for security check. + * @backup_base: The TTM base object of the backup buffer. */ struct vmw_user_surface { struct ttm_prime_object prime; @@ -69,7 +71,7 @@ struct vmw_surface_offset { }; /** - * vmw_surface_dirty - Surface dirty-tracker + * struct vmw_surface_dirty - Surface dirty-tracker * @cache: Cached layout information of the surface. * @size: Accounting size for the struct vmw_surface_dirty. * @num_subres: Number of subresources. @@ -162,7 +164,7 @@ static const struct vmw_res_func vmw_gb_surface_func = { .clean = vmw_surface_clean, }; -/** +/* * struct vmw_surface_dma - SVGA3D DMA command */ struct vmw_surface_dma { @@ -172,7 +174,7 @@ struct vmw_surface_dma { SVGA3dCmdSurfaceDMASuffix suffix; }; -/** +/* * struct vmw_surface_define - SVGA3D Surface Define command */ struct vmw_surface_define { @@ -180,7 +182,7 @@ struct vmw_surface_define { SVGA3dCmdDefineSurface body; }; -/** +/* * struct vmw_surface_destroy - SVGA3D Surface Destroy command */ struct vmw_surface_destroy { @@ -544,6 +546,7 @@ static int vmw_legacy_srf_bind(struct vmw_resource *res, * * @res: Pointer to a struct vmw_res embedded in a struct * vmw_surface. + * @readback: Readback - only true if dirty * @val_buf: Pointer to a struct ttm_validate_buffer containing * information about the backup buffer. * @@ -1060,8 +1063,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, /** * vmw_surface_define_encode - Encode a surface_define command. * - * @srf: Pointer to a struct vmw_surface object. - * @cmd_space: Pointer to memory area in which the commands should be encoded. + * @res: Pointer to a struct vmw_resource embedded in a struct + * vmw_surface. */ static int vmw_gb_surface_create(struct vmw_resource *res) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index e8e79de255cf..eb63cbe64909 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -11,6 +11,7 @@ /** * struct vmw_thp_manager - Range manager implementing huge page alignment * + * @manager: TTM resource manager. * @mm: The underlying range manager. Protected by @lock. * @lock: Manager lock. */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index dbb068830d80..2dc031fe4a90 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -265,6 +265,7 @@ static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) * * @viter: Pointer to the iterator to initialize * @vsgt: Pointer to a struct vmw_sg_table to initialize from + * @p_offset: Pointer offset used to update current array position * * Note that we're following the convention of __sg_page_iter_start, so that * the iterator doesn't point to a valid page after initialization; it has @@ -482,7 +483,7 @@ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) } -static int vmw_ttm_bind(struct ttm_bo_device *bdev, +static int vmw_ttm_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct vmw_ttm_tt *vmw_be = @@ -526,7 +527,7 @@ static int vmw_ttm_bind(struct ttm_bo_device *bdev, return ret; } -static void vmw_ttm_unbind(struct ttm_bo_device *bdev, +static void vmw_ttm_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = @@ -552,7 +553,7 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev, } -static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) +static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, dma_ttm); @@ -572,21 +573,42 @@ static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) } -static int vmw_ttm_populate(struct ttm_bo_device *bdev, +static int vmw_ttm_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { + unsigned int i; + int ret; + /* TODO: maybe completely drop this ? */ if (ttm_tt_is_populated(ttm)) return 0; - return ttm_pool_alloc(&bdev->pool, ttm, ctx); + ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); + if (ret) + return ret; + + for (i = 0; i < ttm->num_pages; ++i) { + ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i], + PAGE_SIZE, ctx); + if (ret) + goto error; + } + return 0; + +error: + while (i--) + ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], + PAGE_SIZE); + ttm_pool_free(&bdev->pool, ttm); + return ret; } -static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, +static void vmw_ttm_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm); + unsigned int i; if (vmw_tt->mob) { vmw_mob_destroy(vmw_tt->mob); @@ -594,6 +616,11 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, } vmw_ttm_unmap_dma(vmw_tt); + + for (i = 0; i < ttm->num_pages; ++i) + ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i], + PAGE_SIZE); + ttm_pool_free(&bdev->pool, ttm); } @@ -639,7 +666,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) return vmw_user_bo_verify_access(bo, tfile); } -static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) +static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); @@ -664,20 +691,19 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resourc * vmw_move_notify - TTM move_notify_callback * * @bo: The TTM buffer object about to move. - * @mem: The struct ttm_resource indicating to what memory + * @old_mem: The old memory where we move from + * @new_mem: The struct ttm_resource indicating to what memory * region the move is taking place. * * Calls move_notify for all subsystems needing it. * (currently only resources). */ static void vmw_move_notify(struct ttm_buffer_object *bo, - bool evict, - struct ttm_resource *mem) + struct ttm_resource *old_mem, + struct ttm_resource *new_mem) { - if (!mem) - return; - vmw_bo_move_notify(bo, mem); - vmw_query_move_notify(bo, mem); + vmw_bo_move_notify(bo, new_mem); + vmw_query_move_notify(bo, old_mem, new_mem); } @@ -708,7 +734,7 @@ static int vmw_move(struct ttm_buffer_object *bo, return ret; } - vmw_move_notify(bo, evict, new_mem); + vmw_move_notify(bo, &bo->mem, new_mem); if (old_man->use_tt && new_man->use_tt) { if (bo->mem.mem_type == TTM_PL_SYSTEM) { @@ -730,19 +756,11 @@ static int vmw_move(struct ttm_buffer_object *bo, } return 0; fail: - swap(*new_mem, bo->mem); - vmw_move_notify(bo, false, new_mem); - swap(*new_mem, bo->mem); + vmw_move_notify(bo, new_mem, &bo->mem); return ret; } -static void -vmw_delete_mem_notify(struct ttm_buffer_object *bo) -{ - vmw_move_notify(bo, false, NULL); -} - -struct ttm_bo_driver vmw_bo_driver = { +struct ttm_device_funcs vmw_bo_driver = { .ttm_tt_create = &vmw_ttm_tt_create, .ttm_tt_populate = &vmw_ttm_populate, .ttm_tt_unpopulate = &vmw_ttm_unpopulate, @@ -751,7 +769,6 @@ struct ttm_bo_driver vmw_bo_driver = { .evict_flags = vmw_evict_flags, .move = vmw_move, .verify_access = vmw_verify_access, - .delete_mem_notify = vmw_delete_mem_notify, .swap_notify = vmw_swap_notify, .io_mem_reserve = &vmw_ttm_io_mem_reserve, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index f2e2bf6d1421..e7570f422400 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -48,7 +48,6 @@ struct vmw_validation_bo_node { u32 as_mob : 1; u32 cpu_blit : 1; }; - /** * struct vmw_validation_res_node - Resource validation metadata. * @head: List head for the resource validation list. @@ -64,6 +63,8 @@ struct vmw_validation_bo_node { * @first_usage: True iff the resource has been seen only once in the current * validation batch. * @reserved: Whether the resource is currently reserved by this process. + * @dirty_set: Change dirty status of the resource. + * @dirty: Dirty information VMW_RES_DIRTY_XX. * @private: Optionally additional memory for caller-private data. * * Bit fields are used since these structures are allocated and freed in @@ -205,7 +206,7 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx, * vmw_validation_find_res_dup - Find a duplicate resource entry in the * validation context's lists. * @ctx: The validation context to search. - * @vbo: The buffer object to search for. + * @res: Reference counted resource pointer. * * Return: Pointer to the struct vmw_validation_bo_node referencing the * duplicate, or NULL if none found. |