diff options
author | Jesse Barnes <jbarnes@virtuousgeek.org> | 2008-11-05 19:31:53 +0100 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2008-12-29 08:47:22 +0100 |
commit | a2c0a97b784f837300f7b0869c82ab712c600952 (patch) | |
tree | aca1cdf3d32e1cfa7387350483f6a70c74a24ffd | |
parent | drm/i915: Add /proc debugging entry for reading out the HWS. (diff) | |
download | linux-a2c0a97b784f837300f7b0869c82ab712c600952.tar.xz linux-a2c0a97b784f837300f7b0869c82ab712c600952.zip |
drm: GEM mmap support
Add core support for mapping of GEM objects. Drivers should provide a
vm_operations_struct if they want to support page faulting of objects.
The code for handling GEM object offsets was taken from TTM, which was
written by Thomas Hellström.
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r-- | drivers/gpu/drm/drm_bufs.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_drv.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_fops.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 109 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_hashtab.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_vm.c | 7 | ||||
-rw-r--r-- | include/drm/drm.h | 1 | ||||
-rw-r--r-- | include/drm/drmP.h | 21 |
8 files changed, 161 insertions, 3 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index dc3ce3e0a0a4..7fb690bcd492 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -262,6 +262,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); break; + case _DRM_GEM: + DRM_ERROR("tried to rmmap GEM object\n"); + break; } case _DRM_SCATTER_GATHER: if (!dev->sg) { @@ -429,6 +432,9 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) dmah.size = map->size; __drm_pci_free(dev, &dmah); break; + case _DRM_GEM: + DRM_ERROR("tried to rmmap GEM object\n"); + break; } drm_free(map, sizeof(*map), DRM_MEM_MAPS); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 9f04ca37df6d..98a781375f60 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -209,6 +209,7 @@ int drm_lastclose(struct drm_device * dev) if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) drm_dma_takedown(dev); + dev->dev_mapping = NULL; mutex_unlock(&dev->struct_mutex); DRM_DEBUG("lastclose completed\n"); @@ -273,6 +274,8 @@ EXPORT_SYMBOL(drm_init); */ static void drm_cleanup(struct drm_device * dev) { + struct drm_driver *driver = dev->driver; + DRM_DEBUG("\n"); if (!dev) { @@ -304,6 +307,9 @@ static void drm_cleanup(struct drm_device * dev) drm_ht_remove(&dev->map_hash); drm_ctxbitmap_cleanup(dev); + if (driver->driver_features & DRIVER_GEM) + drm_gem_destroy(dev); + drm_put_minor(&dev->primary); if (drm_put_dev(dev)) DRM_ERROR("Cannot unload module\n"); diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index f2285237df49..3a6c439652a5 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -133,11 +133,21 @@ int drm_open(struct inode *inode, struct file *filp) spin_lock(&dev->count_lock); if (!dev->open_count++) { spin_unlock(&dev->count_lock); - return drm_setup(dev); + retcode = drm_setup(dev); + goto out; } spin_unlock(&dev->count_lock); } +out: + mutex_lock(&dev->struct_mutex); + if (dev->dev_mapping == NULL) + dev->dev_mapping = inode->i_mapping; + else if (dev->dev_mapping != inode->i_mapping) + WARN(1, "dev->dev_mapping not inode mapping (%p expected %p)\n", + dev->dev_mapping, inode->i_mapping); + mutex_unlock(&dev->struct_mutex); + return retcode; } EXPORT_SYMBOL(drm_open); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ccd1afdede02..b3939de6affd 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -64,6 +64,13 @@ * up at a later date, and as our interface with shmfs for memory allocation. */ +/* + * We make up offsets for buffer objects so we can recognize them at + * mmap time. + */ +#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) +#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) + /** * Initialize the GEM device fields */ @@ -71,6 +78,8 @@ int drm_gem_init(struct drm_device *dev) { + struct drm_gem_mm *mm; + spin_lock_init(&dev->object_name_lock); idr_init(&dev->object_name_idr); atomic_set(&dev->object_count, 0); @@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev) atomic_set(&dev->pin_memory, 0); atomic_set(&dev->gtt_count, 0); atomic_set(&dev->gtt_memory, 0); + + mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM); + if (!mm) { + DRM_ERROR("out of memory\n"); + return -ENOMEM; + } + + dev->mm_private = mm; + + if (drm_ht_create(&mm->offset_hash, 19)) { + drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); + return -ENOMEM; + } + + if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, + DRM_FILE_PAGE_OFFSET_SIZE)) { + drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); + drm_ht_remove(&mm->offset_hash); + return -ENOMEM; + } + return 0; } +void +drm_gem_destroy(struct drm_device *dev) +{ + struct drm_gem_mm *mm = dev->mm_private; + + drm_mm_takedown(&mm->offset_manager); + drm_ht_remove(&mm->offset_hash); + drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM); + dev->mm_private = NULL; +} + /** * Allocate a GEM object of the specified size with shmfs backing store */ @@ -419,3 +460,71 @@ drm_gem_object_handle_free(struct kref *kref) } EXPORT_SYMBOL(drm_gem_object_handle_free); +/** + * drm_gem_mmap - memory map routine for GEM objects + * @filp: DRM file pointer + * @vma: VMA for the area to be mapped + * + * If a driver supports GEM object mapping, mmap calls on the DRM file + * descriptor will end up here. + * + * If we find the object based on the offset passed in (vma->vm_pgoff will + * contain the fake offset we created when the GTT map ioctl was called on + * the object), we set up the driver fault handler so that any accesses + * to the object can be trapped, to perform migration, GTT binding, surface + * register allocation, or performance monitoring. + */ +int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->minor->dev; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_map *map = NULL; + struct drm_gem_object *obj; + struct drm_hash_item *hash; + unsigned long prot; + int ret = 0; + + mutex_lock(&dev->struct_mutex); + + if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { + mutex_unlock(&dev->struct_mutex); + return drm_mmap(filp, vma); + } + + map = drm_hash_entry(hash, struct drm_map_list, hash)->map; + if (!map || + ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { + ret = -EPERM; + goto out_unlock; + } + + /* Check for valid size. */ + if (map->size < vma->vm_end - vma->vm_start) { + ret = -EINVAL; + goto out_unlock; + } + + obj = map->handle; + if (!obj->dev->driver->gem_vm_ops) { + ret = -EINVAL; + goto out_unlock; + } + + vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; + vma->vm_ops = obj->dev->driver->gem_vm_ops; + vma->vm_private_data = map->handle; + /* FIXME: use pgprot_writecombine when available */ + prot = pgprot_val(vma->vm_page_prot); + prot |= _PAGE_CACHE_WC; + vma->vm_page_prot = __pgprot(prot); + + vma->vm_file = filp; /* Needed for drm_vm_open() */ + drm_vm_open_locked(vma); + +out_unlock: + mutex_unlock(&dev->struct_mutex); + + return ret; +} +EXPORT_SYMBOL(drm_gem_mmap); diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c index 33160673a7b7..af539f7d87dd 100644 --- a/drivers/gpu/drm/drm_hashtab.c +++ b/drivers/gpu/drm/drm_hashtab.c @@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) } return 0; } +EXPORT_SYMBOL(drm_ht_insert_item); /* * Just insert an item and return any "bits" bit key that hasn't been @@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) ht->fill--; return 0; } +EXPORT_SYMBOL(drm_ht_remove_item); void drm_ht_remove(struct drm_open_hash *ht) { diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index c234c6f24a8d..3ffae021d280 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -267,6 +267,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) dmah.size = map->size; __drm_pci_free(dev, &dmah); break; + case _DRM_GEM: + DRM_ERROR("tried to rmmap GEM object\n"); + break; } drm_free(map, sizeof(*map), DRM_MEM_MAPS); } @@ -399,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = { * Create a new drm_vma_entry structure as the \p vma private data entry and * add it to drm_device::vmalist. */ -static void drm_vm_open_locked(struct vm_area_struct *vma) +void drm_vm_open_locked(struct vm_area_struct *vma) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; @@ -540,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); * according to the mapping type and remaps the pages. Finally sets the file * pointer and calls vm_open(). */ -static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) +int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { struct drm_file *priv = filp->private_data; struct drm_device *dev = priv->minor->dev; diff --git a/include/drm/drm.h b/include/drm/drm.h index 3fb173c5af3e..3a66252456ba 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h @@ -173,6 +173,7 @@ enum drm_map_type { _DRM_AGP = 3, /**< AGP/GART */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ + _DRM_GEM = 6, /**< GEM object */ }; /** diff --git a/include/drm/drmP.h b/include/drm/drmP.h index c9cc618dbcfc..ae42a6a5c24e 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -528,6 +528,7 @@ struct drm_map_list { struct drm_map *map; /**< mapping */ uint64_t user_token; struct drm_master *master; + struct drm_mm_node *file_offset_node; /**< fake offset */ }; typedef struct drm_map drm_local_map_t; @@ -568,6 +569,14 @@ struct drm_ati_pcigart_info { }; /** + * GEM specific mm private for tracking GEM objects + */ +struct drm_gem_mm { + struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */ + struct drm_open_hash offset_hash; /**< User token hash table for maps */ +}; + +/** * This structure defines the drm_mm memory object, which will be used by the * DRM for its buffer objects. */ @@ -584,6 +593,9 @@ struct drm_gem_object { /** File representing the shmem storage */ struct file *filp; + /* Mapping info for this object */ + struct drm_map_list map_list; + /** * Size of the object, in bytes. Immutable over the object's * lifetime. @@ -758,6 +770,9 @@ struct drm_driver { int (*gem_init_object) (struct drm_gem_object *obj); void (*gem_free_object) (struct drm_gem_object *obj); + /* Driver private ops for this object */ + struct vm_operations_struct *gem_vm_ops; + int major; int minor; int patchlevel; @@ -910,6 +925,8 @@ struct drm_device { struct drm_sg_mem *sg; /**< Scatter gather memory */ int num_crtcs; /**< Number of CRTCs on this device */ void *dev_private; /**< device private data */ + void *mm_private; + struct address_space *dev_mapping; struct drm_sigdata sigdata; /**< For block_all_signals */ sigset_t sigmask; @@ -1026,6 +1043,8 @@ extern int drm_release(struct inode *inode, struct file *filp); /* Mapping support (drm_vm.h) */ extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); +extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); +extern void drm_vm_open_locked(struct vm_area_struct *vma); extern unsigned long drm_core_get_map_ofs(struct drm_map * map); extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); @@ -1287,10 +1306,12 @@ extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); /* Graphics Execution Manager library functions (drm_gem.c) */ int drm_gem_init(struct drm_device *dev); +void drm_gem_destroy(struct drm_device *dev); void drm_gem_object_free(struct kref *kref); struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, size_t size); void drm_gem_object_handle_free(struct kref *kref); +int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); static inline void drm_gem_object_reference(struct drm_gem_object *obj) |