summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon.h
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2009-09-23 08:56:27 +0200
committerDave Airlie <airlied@linux.ie>2009-09-25 05:08:18 +0200
commit513bcb4655e68706594e45dfa1d4b181500110ba (patch)
treeed457db4cfb202015866a131ad4e742503728fad /drivers/gpu/drm/radeon/radeon.h
parentdrm/radeon/kms: enable dac load detection by default. (diff)
downloadlinux-513bcb4655e68706594e45dfa1d4b181500110ba.tar.xz
linux-513bcb4655e68706594e45dfa1d4b181500110ba.zip
drm/radeon/kms: don't require up to 64k allocations. (v2)
This avoids needing to do a kmalloc > PAGE_SIZE for the main indirect buffer chunk, it adds an accessor for all reads from the chunk and caches a single page at a time for subsequent reads. changes since v1: Use a two page pool which should be the most common case a single packet spanning > PAGE_SIZE will be hit, but I'm having trouble seeing anywhere we currently generate anything like that. hopefully proper short page copying at end added parser_error flag to set deep errors instead of having to test every ib value fetch. fixed bug in patch that went to list. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon.h')
-rw-r--r--drivers/gpu/drm/radeon/radeon.h37
1 files changed, 36 insertions, 1 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d5de53e06cec..7e34e4376f95 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -342,7 +342,7 @@ struct radeon_ib {
unsigned long idx;
uint64_t gpu_addr;
struct radeon_fence *fence;
- volatile uint32_t *ptr;
+ uint32_t *ptr;
uint32_t length_dw;
};
@@ -415,7 +415,12 @@ struct radeon_cs_reloc {
struct radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
+ int kpage_idx[2];
+ uint32_t *kpage[2];
uint32_t *kdata;
+ void __user *user_ptr;
+ int last_copied_page;
+ int last_page_index;
};
struct radeon_cs_parser {
@@ -438,8 +443,38 @@ struct radeon_cs_parser {
struct radeon_ib *ib;
void *track;
unsigned family;
+ int parser_error;
};
+extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
+extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
+
+
+static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ u32 pg_idx, pg_offset;
+ u32 idx_value = 0;
+ int new_page;
+
+ pg_idx = (idx * 4) / PAGE_SIZE;
+ pg_offset = (idx * 4) % PAGE_SIZE;
+
+ if (ibc->kpage_idx[0] == pg_idx)
+ return ibc->kpage[0][pg_offset/4];
+ if (ibc->kpage_idx[1] == pg_idx)
+ return ibc->kpage[1][pg_offset/4];
+
+ new_page = radeon_cs_update_pages(p, pg_idx);
+ if (new_page < 0) {
+ p->parser_error = new_page;
+ return 0;
+ }
+
+ idx_value = ibc->kpage[new_page][pg_offset/4];
+ return idx_value;
+}
+
struct radeon_cs_packet {
unsigned idx;
unsigned type;