diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2012-01-19 21:56:50 +0100 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-01-19 21:56:50 +0100 |
commit | 282f445a779ed76fca9884fe377bf56a3088b208 (patch) | |
tree | d9abcf526baee0100672851e0a8894c19e762a39 /drivers/gpu/drm/radeon/radeon.h | |
parent | x86, tsc: Fix SMI induced variation in quick_pit_calibrate() (diff) | |
parent | uml: fix compile for x86-64 (diff) | |
download | linux-282f445a779ed76fca9884fe377bf56a3088b208.tar.xz linux-282f445a779ed76fca9884fe377bf56a3088b208.zip |
Merge remote-tracking branch 'linus/master' into x86/urgent
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon.h')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 367 |
1 files changed, 304 insertions, 63 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 8227e76b5c70..73e05cb85eca 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -107,6 +107,21 @@ extern int radeon_msi; #define RADEONFB_CONN_LIMIT 4 #define RADEON_BIOS_NUM_SCRATCH 8 +/* max number of rings */ +#define RADEON_NUM_RINGS 3 + +/* internal ring indices */ +/* r1xx+ has gfx CP ring */ +#define RADEON_RING_TYPE_GFX_INDEX 0 + +/* cayman has 2 compute CP rings */ +#define CAYMAN_RING_TYPE_CP1_INDEX 1 +#define CAYMAN_RING_TYPE_CP2_INDEX 2 + +/* hardcode those limit for now */ +#define RADEON_VA_RESERVED_SIZE (8 << 20) +#define RADEON_IB_VM_MAX_SIZE (64 << 10) + /* * Errata workarounds. */ @@ -192,14 +207,15 @@ extern int sumo_get_temp(struct radeon_device *rdev); */ struct radeon_fence_driver { uint32_t scratch_reg; + uint64_t gpu_addr; + volatile uint32_t *cpu_addr; atomic_t seq; uint32_t last_seq; unsigned long last_jiffies; unsigned long last_timeout; wait_queue_head_t queue; - rwlock_t lock; struct list_head created; - struct list_head emited; + struct list_head emitted; struct list_head signaled; bool initialized; }; @@ -210,21 +226,26 @@ struct radeon_fence { struct list_head list; /* protected by radeon_fence.lock */ uint32_t seq; - bool emited; + bool emitted; bool signaled; + /* RB, DMA, etc. */ + int ring; + struct radeon_semaphore *semaphore; }; +int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); int radeon_fence_driver_init(struct radeon_device *rdev); void radeon_fence_driver_fini(struct radeon_device *rdev); -int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence); +int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring); int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); -void radeon_fence_process(struct radeon_device *rdev); +void radeon_fence_process(struct radeon_device *rdev, int ring); bool radeon_fence_signaled(struct radeon_fence *fence); int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); -int radeon_fence_wait_next(struct radeon_device *rdev); -int radeon_fence_wait_last(struct radeon_device *rdev); +int radeon_fence_wait_next(struct radeon_device *rdev, int ring); +int radeon_fence_wait_last(struct radeon_device *rdev, int ring); struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); void radeon_fence_unref(struct radeon_fence **fence); +int radeon_fence_count_emitted(struct radeon_device *rdev, int ring); /* * Tiling registers @@ -246,6 +267,21 @@ struct radeon_mman { bool initialized; }; +/* bo virtual address in a specific vm */ +struct radeon_bo_va { + /* bo list is protected by bo being reserved */ + struct list_head bo_list; + /* vm list is protected by vm mutex */ + struct list_head vm_list; + /* constant after initialization */ + struct radeon_vm *vm; + struct radeon_bo *bo; + uint64_t soffset; + uint64_t eoffset; + uint32_t flags; + bool valid; +}; + struct radeon_bo { /* Protected by gem.mutex */ struct list_head list; @@ -259,6 +295,10 @@ struct radeon_bo { u32 tiling_flags; u32 pitch; int surface_reg; + /* list of all virtual address to which this bo + * is associated to + */ + struct list_head va; /* Constant after initialization */ struct radeon_device *rdev; struct drm_gem_object gem_base; @@ -274,6 +314,48 @@ struct radeon_bo_list { u32 tiling_flags; }; +/* sub-allocation manager, it has to be protected by another lock. + * By conception this is an helper for other part of the driver + * like the indirect buffer or semaphore, which both have their + * locking. + * + * Principe is simple, we keep a list of sub allocation in offset + * order (first entry has offset == 0, last entry has the highest + * offset). + * + * When allocating new object we first check if there is room at + * the end total_size - (last_object_offset + last_object_size) >= + * alloc_size. If so we allocate new object there. + * + * When there is not enough room at the end, we start waiting for + * each sub object until we reach object_offset+object_size >= + * alloc_size, this object then become the sub object we return. + * + * Alignment can't be bigger than page size. + * + * Hole are not considered for allocation to keep things simple. + * Assumption is that there won't be hole (all object on same + * alignment). + */ +struct radeon_sa_manager { + struct radeon_bo *bo; + struct list_head sa_bo; + unsigned size; + uint64_t gpu_addr; + void *cpu_ptr; + uint32_t domain; +}; + +struct radeon_sa_bo; + +/* sub-allocation buffer */ +struct radeon_sa_bo { + struct list_head list; + struct radeon_sa_manager *manager; + unsigned offset; + unsigned size; +}; + /* * GEM objects. */ @@ -303,6 +385,46 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv, uint32_t handle); /* + * Semaphores. + */ +struct radeon_ring; + +#define RADEON_SEMAPHORE_BO_SIZE 256 + +struct radeon_semaphore_driver { + rwlock_t lock; + struct list_head bo; +}; + +struct radeon_semaphore_bo; + +/* everything here is constant */ +struct radeon_semaphore { + struct list_head list; + uint64_t gpu_addr; + uint32_t *cpu_ptr; + struct radeon_semaphore_bo *bo; +}; + +struct radeon_semaphore_bo { + struct list_head list; + struct radeon_ib *ib; + struct list_head free; + struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8]; + unsigned nused; +}; + +void radeon_semaphore_driver_fini(struct radeon_device *rdev); +int radeon_semaphore_create(struct radeon_device *rdev, + struct radeon_semaphore **semaphore); +void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, + struct radeon_semaphore *semaphore); +void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, + struct radeon_semaphore *semaphore); +void radeon_semaphore_free(struct radeon_device *rdev, + struct radeon_semaphore *semaphore); + +/* * GART structures, functions & helpers */ struct radeon_mc; @@ -310,6 +432,7 @@ struct radeon_mc; #define RADEON_GPU_PAGE_SIZE 4096 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) #define RADEON_GPU_PAGE_SHIFT 12 +#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK) struct radeon_gart { dma_addr_t table_addr; @@ -320,7 +443,6 @@ struct radeon_gart { unsigned table_size; struct page **pages; dma_addr_t *pages_addr; - bool *ttm_alloced; bool ready; }; @@ -434,7 +556,7 @@ union radeon_irq_stat_regs { struct radeon_irq { bool installed; - bool sw_int; + bool sw_int[RADEON_NUM_RINGS]; bool crtc_vblank_int[RADEON_MAX_CRTCS]; bool pflip[RADEON_MAX_CRTCS]; wait_queue_head_t vblank_queue; @@ -444,7 +566,7 @@ struct radeon_irq { wait_queue_head_t idle_queue; bool hdmi[RADEON_MAX_HDMI_BLOCKS]; spinlock_t sw_lock; - int sw_refcount; + int sw_refcount[RADEON_NUM_RINGS]; union radeon_irq_stat_regs stat_regs; spinlock_t pflip_lock[RADEON_MAX_CRTCS]; int pflip_refcount[RADEON_MAX_CRTCS]; @@ -452,22 +574,23 @@ struct radeon_irq { int radeon_irq_kms_init(struct radeon_device *rdev); void radeon_irq_kms_fini(struct radeon_device *rdev); -void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); -void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); +void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring); +void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring); void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); /* - * CP & ring. + * CP & rings. */ + struct radeon_ib { - struct list_head list; + struct radeon_sa_bo sa_bo; unsigned idx; + uint32_t length_dw; uint64_t gpu_addr; - struct radeon_fence *fence; uint32_t *ptr; - uint32_t length_dw; - bool free; + struct radeon_fence *fence; + unsigned vm_id; }; /* @@ -475,20 +598,22 @@ struct radeon_ib { * mutex protects scheduled_ibs, ready, alloc_bm */ struct radeon_ib_pool { - struct mutex mutex; - struct radeon_bo *robj; - struct list_head bogus_ib; - struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; - bool ready; - unsigned head_id; + struct mutex mutex; + struct radeon_sa_manager sa_manager; + struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; + bool ready; + unsigned head_id; }; -struct radeon_cp { +struct radeon_ring { struct radeon_bo *ring_obj; volatile uint32_t *ring; unsigned rptr; + unsigned rptr_offs; + unsigned rptr_reg; unsigned wptr; unsigned wptr_old; + unsigned wptr_reg; unsigned ring_size; unsigned ring_free_dw; int count_dw; @@ -497,6 +622,61 @@ struct radeon_cp { uint32_t ptr_mask; struct mutex mutex; bool ready; + u32 ptr_reg_shift; + u32 ptr_reg_mask; + u32 nop; +}; + +/* + * VM + */ +struct radeon_vm { + struct list_head list; + struct list_head va; + int id; + unsigned last_pfn; + u64 pt_gpu_addr; + u64 *pt; + struct radeon_sa_bo sa_bo; + struct mutex mutex; + /* last fence for cs using this vm */ + struct radeon_fence *fence; +}; + +struct radeon_vm_funcs { + int (*init)(struct radeon_device *rdev); + void (*fini)(struct radeon_device *rdev); + /* cs mutex must be lock for schedule_ib */ + int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id); + void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm); + void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm); + uint32_t (*page_flags)(struct radeon_device *rdev, + struct radeon_vm *vm, + uint32_t flags); + void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm, + unsigned pfn, uint64_t addr, uint32_t flags); +}; + +struct radeon_vm_manager { + struct list_head lru_vm; + uint32_t use_bitmap; + struct radeon_sa_manager sa_manager; + uint32_t max_pfn; + /* fields constant after init */ + const struct radeon_vm_funcs *funcs; + /* number of VMIDs */ + unsigned nvm; + /* vram base address for page table entry */ + u64 vram_base_offset; + /* is vm enabled? */ + bool enabled; +}; + +/* + * file private structure + */ +struct radeon_fpriv { + struct radeon_vm vm; }; /* @@ -506,6 +686,7 @@ struct r600_ih { struct radeon_bo *ring_obj; volatile uint32_t *ring; unsigned rptr; + unsigned rptr_offs; unsigned wptr; unsigned wptr_old; unsigned ring_size; @@ -549,23 +730,29 @@ struct r600_blit { void r600_blit_suspend(struct radeon_device *rdev); -int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib); +int radeon_ib_get(struct radeon_device *rdev, int ring, + struct radeon_ib **ib, unsigned size); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); +bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_pool_init(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev); +int radeon_ib_pool_start(struct radeon_device *rdev); +int radeon_ib_pool_suspend(struct radeon_device *rdev); int radeon_ib_test(struct radeon_device *rdev); -extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); /* Ring access between begin & end cannot sleep */ -void radeon_ring_free_size(struct radeon_device *rdev); -int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw); -int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); -void radeon_ring_commit(struct radeon_device *rdev); -void radeon_ring_unlock_commit(struct radeon_device *rdev); -void radeon_ring_unlock_undo(struct radeon_device *rdev); -int radeon_ring_test(struct radeon_device *rdev); -int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size); -void radeon_ring_fini(struct radeon_device *rdev); +int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp); +void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); +int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); +int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); +void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); +void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); +void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); +int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); +int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, + unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, + u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop); +void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); /* @@ -582,12 +769,12 @@ struct radeon_cs_reloc { struct radeon_cs_chunk { uint32_t chunk_id; uint32_t length_dw; - int kpage_idx[2]; - uint32_t *kpage[2]; + int kpage_idx[2]; + uint32_t *kpage[2]; uint32_t *kdata; - void __user *user_ptr; - int last_copied_page; - int last_page_index; + void __user *user_ptr; + int last_copied_page; + int last_page_index; }; struct radeon_cs_parser { @@ -605,14 +792,18 @@ struct radeon_cs_parser { struct radeon_cs_reloc *relocs; struct radeon_cs_reloc **relocs_ptr; struct list_head validated; + bool sync_to_ring[RADEON_NUM_RINGS]; /* indices of various chunks */ int chunk_ib_idx; int chunk_relocs_idx; + int chunk_flags_idx; struct radeon_ib *ib; void *track; unsigned family; int parser_error; - bool keep_tiling_flags; + u32 cs_flags; + u32 ring; + s32 priority; }; extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); @@ -869,11 +1060,20 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number); * Testing */ void radeon_test_moves(struct radeon_device *rdev); +void radeon_test_ring_sync(struct radeon_device *rdev, + struct radeon_ring *cpA, + struct radeon_ring *cpB); +void radeon_test_syncing(struct radeon_device *rdev); /* * Debugfs */ +struct radeon_debugfs { + struct drm_info_list *files; + unsigned num_files; +}; + int radeon_debugfs_add_files(struct radeon_device *rdev, struct drm_info_list *files, unsigned nfiles); @@ -889,21 +1089,27 @@ struct radeon_asic { int (*resume)(struct radeon_device *rdev); int (*suspend)(struct radeon_device *rdev); void (*vga_set_state)(struct radeon_device *rdev, bool state); - bool (*gpu_is_lockup)(struct radeon_device *rdev); + bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); int (*asic_reset)(struct radeon_device *rdev); void (*gart_tlb_flush)(struct radeon_device *rdev); int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); void (*cp_fini)(struct radeon_device *rdev); void (*cp_disable)(struct radeon_device *rdev); - void (*cp_commit)(struct radeon_device *rdev); void (*ring_start)(struct radeon_device *rdev); - int (*ring_test)(struct radeon_device *rdev); - void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); + + struct { + void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); + int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib); + void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); + void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, + struct radeon_semaphore *semaphore, bool emit_wait); + } ring[RADEON_NUM_RINGS]; + + int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); int (*irq_set)(struct radeon_device *rdev); int (*irq_process)(struct radeon_device *rdev); u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); - void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence); int (*cs_parse)(struct radeon_cs_parser *p); int (*copy_blit)(struct radeon_device *rdev, uint64_t src_offset, @@ -1132,6 +1338,8 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int radeon_gem_va_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); @@ -1231,11 +1439,10 @@ struct radeon_device { struct radeon_mode_info mode_info; struct radeon_scratch scratch; struct radeon_mman mman; - struct radeon_fence_driver fence_drv; - struct radeon_cp cp; - /* cayman compute rings */ - struct radeon_cp cp1; - struct radeon_cp cp2; + rwlock_t fence_lock; + struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; + struct radeon_semaphore_driver semaphore_drv; + struct radeon_ring ring[RADEON_NUM_RINGS]; struct radeon_ib_pool ib_pool; struct radeon_irq irq; struct radeon_asic *asic; @@ -1279,6 +1486,13 @@ struct radeon_device { struct drm_file *cmask_filp; /* i2c buses */ struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; + /* debugfs */ + struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS]; + unsigned debugfs_count; + /* virtual memory */ + struct radeon_vm_manager vm_manager; + /* ring used for bo copies */ + u32 copy_ring; }; int radeon_device_init(struct radeon_device *rdev, @@ -1414,18 +1628,17 @@ void radeon_atombios_fini(struct radeon_device *rdev); /* * RING helpers. */ - #if DRM_DEBUG_CODE == 0 -static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) +static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) { - rdev->cp.ring[rdev->cp.wptr++] = v; - rdev->cp.wptr &= rdev->cp.ptr_mask; - rdev->cp.count_dw--; - rdev->cp.ring_free_dw--; + ring->ring[ring->wptr++] = v; + ring->wptr &= ring->ptr_mask; + ring->count_dw--; + ring->ring_free_dw--; } #else /* With debugging this is just too big to inline */ -void radeon_ring_write(struct radeon_device *rdev, uint32_t v); +void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #endif /* @@ -1437,18 +1650,19 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v); #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) -#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev)) +#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp)) #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) -#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) -#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) -#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) +#define radeon_ring_test(rdev, cp) (rdev)->asic->ring_test((rdev), (cp)) +#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) +#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) -#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence)) +#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) +#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) #define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) #define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) #define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f)) @@ -1503,6 +1717,33 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); /* + * vm + */ +int radeon_vm_manager_init(struct radeon_device *rdev); +void radeon_vm_manager_fini(struct radeon_device *rdev); +int radeon_vm_manager_start(struct radeon_device *rdev); +int radeon_vm_manager_suspend(struct radeon_device *rdev); +int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); +void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); +int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm); +void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); +int radeon_vm_bo_update_pte(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_bo *bo, + struct ttm_mem_reg *mem); +void radeon_vm_bo_invalidate(struct radeon_device *rdev, + struct radeon_bo *bo); +int radeon_vm_bo_add(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_bo *bo, + uint64_t offset, + uint32_t flags); +int radeon_vm_bo_rmv(struct radeon_device *rdev, + struct radeon_vm *vm, + struct radeon_bo *bo); + + +/* * R600 vram scratch functions */ int r600_vram_scratch_init(struct radeon_device *rdev); |