summaryrefslogtreecommitdiffstats
path: root/include/drm
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/amd_asic_type.h1
-rw-r--r--include/drm/drm_atomic.h16
-rw-r--r--include/drm/drm_cache.h7
-rw-r--r--include/drm/drm_dp_helper.h17
-rw-r--r--include/drm/drm_dp_mst_helper.h8
-rw-r--r--include/drm/drm_fb_cma_helper.h5
-rw-r--r--include/drm/drm_fourcc.h9
-rw-r--r--include/drm/drm_gem_cma_helper.h3
-rw-r--r--include/drm/gpu_scheduler.h2
-rw-r--r--include/drm/ttm/ttm_bo_api.h17
-rw-r--r--include/drm/ttm/ttm_bo_driver.h64
-rw-r--r--include/drm/ttm/ttm_caching.h2
-rw-r--r--include/drm/ttm/ttm_device.h15
-rw-r--r--include/drm/ttm/ttm_kmap_iter.h61
-rw-r--r--include/drm/ttm/ttm_range_manager.h42
-rw-r--r--include/drm/ttm/ttm_resource.h111
-rw-r--r--include/drm/ttm/ttm_tt.h29
17 files changed, 306 insertions, 103 deletions
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 02f703ec4230..0bf0ad869eb9 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -60,6 +60,7 @@ enum amd_asic_type {
CHIP_VANGOGH, /* 31 */
CHIP_DIMGREY_CAVEFISH, /* 32 */
CHIP_BEIGE_GOBY, /* 33 */
+ CHIP_YELLOW_CARP, /* 34 */
CHIP_LAST,
};
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index ac5a28eff2c8..1701c2128a5c 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -896,6 +896,22 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(new_plane_state) = (__state)->planes[__i].new_state, 1))
/**
+ * for_each_new_plane_in_state_reverse - other than only tracking new state,
+ * it's the same as for_each_oldnew_plane_in_state_reverse
+ * @__state: &struct drm_atomic_state pointer
+ * @plane: &struct drm_plane iteration cursor
+ * @new_plane_state: &struct drm_plane_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ */
+#define for_each_new_plane_in_state_reverse(__state, plane, new_plane_state, __i) \
+ for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \
+ (__i) >= 0; \
+ (__i)--) \
+ for_each_if ((__state)->planes[__i].ptr && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (new_plane_state) = (__state)->planes[__i].new_state, 1))
+
+/**
* for_each_old_plane_in_state - iterate over all planes in an atomic update
* @__state: &struct drm_atomic_state pointer
* @plane: &struct drm_plane iteration cursor
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index e9ad4863d915..cc9de1632dd3 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -35,6 +35,8 @@
#include <linux/scatterlist.h>
+struct dma_buf_map;
+
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(void *addr, unsigned long length);
@@ -70,4 +72,9 @@ static inline bool drm_arch_can_wc_memory(void)
#endif
}
+void drm_memcpy_init_early(void);
+
+void drm_memcpy_from_wc(struct dma_buf_map *dst,
+ const struct dma_buf_map *src,
+ unsigned long len);
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 06681bf46d81..3f2715eb965f 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1377,10 +1377,27 @@ enum drm_dp_phy {
#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+
+#define __DP_FEC1_BASE 0xf0290 /* 1.4 */
+#define __DP_FEC2_BASE 0xf0298 /* 1.4 */
+#define DP_FEC_BASE(dp_phy) \
+ (__DP_FEC1_BASE + ((__DP_FEC2_BASE - __DP_FEC1_BASE) * \
+ ((dp_phy) - DP_PHY_LTTPR1)))
+
+#define DP_FEC_REG(dp_phy, fec1_reg) \
+ (DP_FEC_BASE(dp_phy) - DP_FEC_BASE(DP_PHY_LTTPR1) + fec1_reg)
+
#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */
+#define DP_FEC_STATUS_PHY_REPEATER(dp_phy) \
+ DP_FEC_REG(dp_phy, DP_FEC_STATUS_PHY_REPEATER1)
+
#define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */
#define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */
+#define DP_LTTPR_MAX_ADD 0xf02ff /* 1.4 */
+
+#define DP_DPCD_MAX_ADD 0xfffff /* 1.4 */
+
/* Repeater modes */
#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */
#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index c87a829b6498..ddb9231d0309 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -596,11 +596,11 @@ struct drm_dp_mst_topology_mgr {
/**
* @max_lane_count: maximum number of lanes the GPU can drive.
*/
- u8 max_lane_count;
+ int max_lane_count;
/**
- * @max_link_rate: maximum link rate per lane GPU can output.
+ * @max_link_rate: maximum link rate per lane GPU can output, in kHz.
*/
- u8 max_link_rate;
+ int max_link_rate;
/**
* @conn_base_id: DRM connector ID this mgr is connected to. Only used
* to build the MST connector path value.
@@ -774,7 +774,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
struct drm_device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes,
int max_payloads,
- u8 max_lane_count, u8 max_link_rate,
+ int max_lane_count, int max_link_rate,
int conn_base_id);
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 795aea1d0a25..6447e34528f8 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
+struct drm_device;
struct drm_framebuffer;
struct drm_plane_state;
@@ -14,5 +15,9 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_plane_state *state,
unsigned int plane);
+void drm_fb_cma_sync_non_coherent(struct drm_device *drm,
+ struct drm_plane_state *old_state,
+ struct drm_plane_state *state);
+
#endif
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 156b122c0ad5..3b138d4ae67e 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -136,14 +136,6 @@ struct drm_format_info {
};
/**
- * struct drm_format_name_buf - name of a DRM format
- * @str: string buffer containing the format name
- */
-struct drm_format_name_buf {
- char str[32];
-};
-
-/**
* drm_format_info_is_yuv_packed - check that the format info matches a YUV
* format with data laid in a single plane
* @info: format info
@@ -318,6 +310,5 @@ unsigned int drm_format_info_block_height(const struct drm_format_info *info,
int plane);
uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
int plane, unsigned int buffer_width);
-const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 0a9711caa3e8..cd13508acbc1 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -16,6 +16,7 @@ struct drm_mode_create_dumb;
* more than one entry but they are guaranteed to have contiguous
* DMA addresses.
* @vaddr: kernel virtual address of the backing memory
+ * @map_noncoherent: if true, the GEM object is backed by non-coherent memory
*/
struct drm_gem_cma_object {
struct drm_gem_object base;
@@ -24,6 +25,8 @@ struct drm_gem_cma_object {
/* For objects with DMA memory allocated by GEM CMA */
void *vaddr;
+
+ bool map_noncoherent;
};
#define to_drm_gem_cma_obj(gem_obj) \
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 10225a0a35d0..d18af49fd009 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -275,7 +275,7 @@ struct drm_sched_backend_ops {
* @pending_list: the list of jobs which are currently in the job queue.
* @job_list_lock: lock to protect the pending_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
- * guilty and it will be considered for scheduling further.
+ * guilty and it will no longer be considered for scheduling.
* @score: score to help loadbalancer pick a idle sched
* @_score: score used when the driver doesn't provide one
* @ready: marks if the underlying HW is ready to work
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 639521880c29..f681bbdbc698 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -136,7 +136,7 @@ struct ttm_buffer_object {
* Members protected by the bo::resv::reserved lock.
*/
- struct ttm_resource mem;
+ struct ttm_resource *resource;
struct ttm_tt *ttm;
bool deleted;
@@ -525,19 +525,6 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
/**
- * ttm_bo_mmap - mmap out of the ttm device address space.
- *
- * @filp: filp as input from the mmap method.
- * @vma: vma as input from the mmap method.
- * @bdev: Pointer to the ttm_device with the address space manager.
- *
- * This function is intended to be called by the device mmap method.
- * if the device address space is to be backed by the bo manager.
- */
-int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_device *bdev);
-
-/**
* ttm_bo_io
*
* @bdev: Pointer to the struct ttm_device.
@@ -620,4 +607,6 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
+vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);
+
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index dbccac957f8f..68d6069572aa 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -40,6 +40,7 @@
#include <drm/ttm/ttm_device.h>
#include "ttm_bo_api.h"
+#include "ttm_kmap_iter.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
#include "ttm_pool.h"
@@ -96,7 +97,7 @@ struct ttm_lru_bulk_move {
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- struct ttm_resource *mem,
+ struct ttm_resource **mem,
struct ttm_operation_ctx *ctx);
/**
@@ -181,15 +182,15 @@ static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
{
spin_lock(&bo->bdev->lru_lock);
- ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
+ ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
spin_unlock(&bo->bdev->lru_lock);
}
static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
- bo->mem = *new_mem;
- new_mem->mm_node = NULL;
+ WARN_ON(bo->resource);
+ bo->resource = new_mem;
}
/**
@@ -202,9 +203,7 @@ static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
- struct ttm_resource *old_mem = &bo->mem;
-
- WARN_ON(old_mem->mm_node != NULL);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
}
@@ -273,6 +272,23 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem);
/**
+ * ttm_bo_move_accel_cleanup.
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
+ * by the caller to be idle. Typically used after memcpy buffer moves.
+ */
+static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
+ struct ttm_resource *new_mem)
+{
+ int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem);
+
+ WARN_ON(ret);
+}
+
+/**
* ttm_bo_pipeline_gutting.
*
* @bo: A pointer to a struct ttm_buffer_object.
@@ -306,30 +322,14 @@ int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
*/
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
-/**
- * ttm_range_man_init
- *
- * @bdev: ttm device
- * @type: memory manager type
- * @use_tt: if the memory manager uses tt
- * @p_size: size of area to be managed in pages.
- *
- * Initialise a generic range manager for the selected memory type.
- * The range manager is installed for this device in the type slot.
- */
-int ttm_range_man_init(struct ttm_device *bdev,
- unsigned type, bool use_tt,
- unsigned long p_size);
-
-/**
- * ttm_range_man_fini
- *
- * @bdev: ttm device
- * @type: memory manager type
- *
- * Remove the generic range manager from a slot and tear it down.
- */
-int ttm_range_man_fini(struct ttm_device *bdev,
- unsigned type);
+void ttm_move_memcpy(struct ttm_buffer_object *bo,
+ u32 num_pages,
+ struct ttm_kmap_iter *dst_iter,
+ struct ttm_kmap_iter *src_iter);
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+ struct io_mapping *iomap,
+ struct sg_table *st,
+ resource_size_t start);
#endif
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
index a0b4a49fa432..3c9dd65f5aaf 100644
--- a/include/drm/ttm/ttm_caching.h
+++ b/include/drm/ttm/ttm_caching.h
@@ -33,4 +33,6 @@ enum ttm_caching {
ttm_cached
};
+pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp);
+
#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index 7c8f87bd52d3..cd592f8e941b 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -162,21 +162,6 @@ struct ttm_device_funcs {
struct ttm_place *hop);
/**
- * struct ttm_bo_driver_member verify_access
- *
- * @bo: Pointer to a buffer object.
- * @filp: Pointer to a struct file trying to access the object.
- *
- * Called from the map / write / read methods to verify that the
- * caller is permitted to access the buffer object.
- * This member may be set to NULL, which will refuse this kind of
- * access for all buffer objects.
- * This function should return 0 if access is granted, -EPERM otherwise.
- */
- int (*verify_access)(struct ttm_buffer_object *bo,
- struct file *filp);
-
- /**
* Hook to notify driver about a resource delete.
*/
void (*delete_mem_notify)(struct ttm_buffer_object *bo);
diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h
new file mode 100644
index 000000000000..8bb00fd39d6c
--- /dev/null
+++ b/include/drm/ttm/ttm_kmap_iter.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+#ifndef __TTM_KMAP_ITER_H__
+#define __TTM_KMAP_ITER_H__
+
+#include <linux/types.h>
+
+struct ttm_kmap_iter;
+struct dma_buf_map;
+
+/**
+ * struct ttm_kmap_iter_ops - Ops structure for a struct
+ * ttm_kmap_iter.
+ * @maps_tt: Whether the iterator maps TT memory directly, as opposed
+ * mapping a TT through an aperture. Both these modes have
+ * struct ttm_resource_manager::use_tt set, but the latter typically
+ * returns is_iomem == true from ttm_mem_io_reserve.
+ */
+struct ttm_kmap_iter_ops {
+ /**
+ * kmap_local() - Map a PAGE_SIZE part of the resource using
+ * kmap_local semantics.
+ * @res_iter: Pointer to the struct ttm_kmap_iter representing
+ * the resource.
+ * @dmap: The struct dma_buf_map holding the virtual address after
+ * the operation.
+ * @i: The location within the resource to map. PAGE_SIZE granularity.
+ */
+ void (*map_local)(struct ttm_kmap_iter *res_iter,
+ struct dma_buf_map *dmap, pgoff_t i);
+ /**
+ * unmap_local() - Unmap a PAGE_SIZE part of the resource previously
+ * mapped using kmap_local.
+ * @res_iter: Pointer to the struct ttm_kmap_iter representing
+ * the resource.
+ * @dmap: The struct dma_buf_map holding the virtual address after
+ * the operation.
+ */
+ void (*unmap_local)(struct ttm_kmap_iter *res_iter,
+ struct dma_buf_map *dmap);
+ bool maps_tt;
+};
+
+/**
+ * struct ttm_kmap_iter - Iterator for kmap_local type operations on a
+ * resource.
+ * @ops: Pointer to the operations struct.
+ *
+ * This struct is intended to be embedded in a resource-specific specialization
+ * implementing operations for the resource.
+ *
+ * Nothing stops us from extending the operations to vmap, vmap_pfn etc,
+ * replacing some or parts of the ttm_bo_util. cpu-map functionality.
+ */
+struct ttm_kmap_iter {
+ const struct ttm_kmap_iter_ops *ops;
+};
+
+#endif /* __TTM_KMAP_ITER_H__ */
diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h
new file mode 100644
index 000000000000..22b6fa42ac20
--- /dev/null
+++ b/include/drm/ttm/ttm_range_manager.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef _TTM_RANGE_MANAGER_H_
+#define _TTM_RANGE_MANAGER_H_
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/drm_mm.h>
+
+/**
+ * struct ttm_range_mgr_node
+ *
+ * @base: base clase we extend
+ * @mm_nodes: MM nodes, usually 1
+ *
+ * Extending the ttm_resource object to manage an address space allocation with
+ * one or more drm_mm_nodes.
+ */
+struct ttm_range_mgr_node {
+ struct ttm_resource base;
+ struct drm_mm_node mm_nodes[];
+};
+
+/**
+ * to_ttm_range_mgr_node
+ *
+ * @res: the resource to upcast
+ *
+ * Upcast the ttm_resource object into a ttm_range_mgr_node object.
+ */
+static inline struct ttm_range_mgr_node *
+to_ttm_range_mgr_node(struct ttm_resource *res)
+{
+ return container_of(res, struct ttm_range_mgr_node, base);
+}
+
+int ttm_range_man_init(struct ttm_device *bdev,
+ unsigned type, bool use_tt,
+ unsigned long p_size);
+int ttm_range_man_fini(struct ttm_device *bdev,
+ unsigned type);
+
+#endif
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 890b9d369519..140b6b9a8bbe 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -27,9 +27,11 @@
#include <linux/types.h>
#include <linux/mutex.h>
+#include <linux/dma-buf-map.h>
#include <linux/dma-fence.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
#define TTM_MAX_BO_PRIORITY 4U
@@ -38,6 +40,10 @@ struct ttm_resource_manager;
struct ttm_resource;
struct ttm_place;
struct ttm_buffer_object;
+struct dma_buf_map;
+struct io_mapping;
+struct sg_table;
+struct scatterlist;
struct ttm_resource_manager_func {
/**
@@ -45,46 +51,38 @@ struct ttm_resource_manager_func {
*
* @man: Pointer to a memory type manager.
* @bo: Pointer to the buffer object we're allocating space for.
- * @placement: Placement details.
- * @flags: Additional placement flags.
- * @mem: Pointer to a struct ttm_resource to be filled in.
+ * @place: Placement details.
+ * @res: Resulting pointer to the ttm_resource.
*
* This function should allocate space in the memory type managed
- * by @man. Placement details if
- * applicable are given by @placement. If successful,
- * @mem::mm_node should be set to a non-null value, and
- * @mem::start should be set to a value identifying the beginning
+ * by @man. Placement details if applicable are given by @place. If
+ * successful, a filled in ttm_resource object should be returned in
+ * @res. @res::start should be set to a value identifying the beginning
* of the range allocated, and the function should return zero.
- * If the memory region accommodate the buffer object, @mem::mm_node
- * should be set to NULL, and the function should return 0.
+ * If the manager can't fulfill the request -ENOSPC should be returned.
* If a system error occurred, preventing the request to be fulfilled,
* the function should return a negative error code.
*
- * Note that @mem::mm_node will only be dereferenced by
- * struct ttm_resource_manager functions and optionally by the driver,
- * which has knowledge of the underlying type.
- *
- * This function may not be called from within atomic context, so
- * an implementation can and must use either a mutex or a spinlock to
- * protect any data structures managing the space.
+ * This function may not be called from within atomic context and needs
+ * to take care of its own locking to protect any data structures
+ * managing the space.
*/
int (*alloc)(struct ttm_resource_manager *man,
struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem);
+ struct ttm_resource **res);
/**
* struct ttm_resource_manager_func member free
*
* @man: Pointer to a memory type manager.
- * @mem: Pointer to a struct ttm_resource to be filled in.
+ * @res: Pointer to a struct ttm_resource to be freed.
*
- * This function frees memory type resources previously allocated
- * and that are identified by @mem::mm_node and @mem::start. May not
- * be called from within atomic context.
+ * This function frees memory type resources previously allocated.
+ * May not be called from within atomic context.
*/
void (*free)(struct ttm_resource_manager *man,
- struct ttm_resource *mem);
+ struct ttm_resource *res);
/**
* struct ttm_resource_manager_func member debug
@@ -158,9 +156,9 @@ struct ttm_bus_placement {
/**
* struct ttm_resource
*
- * @mm_node: Memory manager node.
- * @size: Requested size of memory region.
- * @num_pages: Actual size of memory region in pages.
+ * @start: Start of the allocation.
+ * @num_pages: Actual size of resource in pages.
+ * @mem_type: Resource type of the allocation.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
*
@@ -168,7 +166,6 @@ struct ttm_bus_placement {
* buffer object.
*/
struct ttm_resource {
- void *mm_node;
unsigned long start;
unsigned long num_pages;
uint32_t mem_type;
@@ -177,6 +174,45 @@ struct ttm_resource {
};
/**
+ * struct ttm_kmap_iter_iomap - Specialization for a struct io_mapping +
+ * struct sg_table backed struct ttm_resource.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface.
+ * @iomap: struct io_mapping representing the underlying linear io_memory.
+ * @st: sg_table into @iomap, representing the memory of the struct ttm_resource.
+ * @start: Offset that needs to be subtracted from @st to make
+ * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
+ * @cache: Scatterlist traversal cache for fast lookups.
+ * @cache.sg: Pointer to the currently cached scatterlist segment.
+ * @cache.i: First index of @sg. PAGE_SIZE granularity.
+ * @cache.end: Last index + 1 of @sg. PAGE_SIZE granularity.
+ * @cache.offs: First offset into @iomap of @sg. PAGE_SIZE granularity.
+ */
+struct ttm_kmap_iter_iomap {
+ struct ttm_kmap_iter base;
+ struct io_mapping *iomap;
+ struct sg_table *st;
+ resource_size_t start;
+ struct {
+ struct scatterlist *sg;
+ pgoff_t i;
+ pgoff_t end;
+ pgoff_t offs;
+ } cache;
+};
+
+/**
+ * struct ttm_kmap_iter_linear_io - Iterator specialization for linear io
+ * @base: The base iterator
+ * @dmap: Points to the starting address of the region
+ * @needs_unmap: Whether we need to unmap on fini
+ */
+struct ttm_kmap_iter_linear_io {
+ struct ttm_kmap_iter base;
+ struct dma_buf_map dmap;
+ bool needs_unmap;
+};
+
+/**
* ttm_resource_manager_set_used
*
* @man: A memory manager object.
@@ -223,10 +259,13 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man)
man->move = NULL;
}
+void ttm_resource_init(struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource *res);
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *res);
-void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
+ struct ttm_resource **res);
+void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);
@@ -237,4 +276,20 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
+struct ttm_kmap_iter *
+ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
+ struct io_mapping *iomap,
+ struct sg_table *st,
+ resource_size_t start);
+
+struct ttm_kmap_iter_linear_io;
+
+struct ttm_kmap_iter *
+ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
+ struct ttm_device *bdev,
+ struct ttm_resource *mem);
#endif
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 134d09ef7766..818680c6a8ed 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include <drm/ttm/ttm_caching.h>
+#include <drm/ttm/ttm_kmap_iter.h>
struct ttm_bo_device;
struct ttm_tt;
@@ -69,6 +70,18 @@ struct ttm_tt {
enum ttm_caching caching;
};
+/**
+ * struct ttm_kmap_iter_tt - Specialization of a mappig iterator for a tt.
+ * @base: Embedded struct ttm_kmap_iter providing the usage interface
+ * @tt: Cached struct ttm_tt.
+ * @prot: Cached page protection for mapping.
+ */
+struct ttm_kmap_iter_tt {
+ struct ttm_kmap_iter base;
+ struct ttm_tt *tt;
+ pgprot_t prot;
+};
+
static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
{
return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
@@ -157,8 +170,24 @@ int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_oper
*/
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
+/**
+ * ttm_tt_mark_for_clear - Mark pages for clearing on populate.
+ *
+ * @ttm: Pointer to the ttm_tt structure
+ *
+ * Marks pages for clearing so that the next time the page vector is
+ * populated, the pages will be cleared.
+ */
+static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
+{
+ ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+}
+
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
+struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
+ struct ttm_tt *tt);
+
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>