diff options
author | Jerome Glisse <jglisse@redhat.com> | 2013-02-11 14:57:18 +0100 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-02-12 07:56:25 +0100 |
commit | de0babd60d8d43b58fd06a7803151d32cb589af0 (patch) | |
tree | 4791ef696001b44741deb85c092c1cc43654ab6b /drivers | |
parent | Merge branch 'drm-nouveau-fixes-3.8' of git://anongit.freedesktop.org/git/nou... (diff) | |
download | linux-de0babd60d8d43b58fd06a7803151d32cb589af0.tar.xz linux-de0babd60d8d43b58fd06a7803151d32cb589af0.zip |
drm/radeon: enforce use of radeon_get_ib_value when reading user cmd
When ever parsing cmd buffer supplied by userspace we need to use
radeon_get_ib_value rather than directly accessing the ib as the user
cmd might not yet be copied into the ib thus the parser might read
value that does not correspond to what user is sending and possibly
allowing user to send malicious command undected.
Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen_cs.c | 86 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600_cs.c | 38 |
2 files changed, 62 insertions, 62 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 7a445666e71f..ee4cff534f10 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -2909,14 +2909,14 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } if (tiled) { - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); p->idx += count + 7; } else { - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; @@ -2954,12 +2954,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); return -EINVAL; } - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; - dst2_offset = ib[idx+2]; + dst2_offset = radeon_get_ib_value(p, idx+2); dst2_offset <<= 8; - src_offset = ib[idx+8]; - src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+8); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3014,12 +3014,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; - dst2_offset = ib[idx+2]; + dst2_offset = radeon_get_ib_value(p, idx+2); dst2_offset <<= 8; - src_offset = ib[idx+8]; - src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+8); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3046,22 +3046,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) /* detile bit */ if (idx_value & (1 << 31)) { /* tiled src, linear dst */ - src_offset = ib[idx+1]; + src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); - dst_offset = ib[idx+7]; - dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+7); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; } else { /* linear src, tiled dst */ - src_offset = ib[idx+7]; - src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+7); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } @@ -3098,12 +3098,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; - dst2_offset = ib[idx+2]; + dst2_offset = radeon_get_ib_value(p, idx+2); dst2_offset <<= 8; - src_offset = ib[idx+8]; - src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+8); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3135,22 +3135,22 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) /* detile bit */ if (idx_value & (1 << 31)) { /* tiled src, linear dst */ - src_offset = ib[idx+1]; + src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); - dst_offset = ib[idx+7]; - dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+7); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; } else { /* linear src, tiled dst */ - src_offset = ib[idx+7]; - src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+7); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32; ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } @@ -3176,10 +3176,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) switch (misc) { case 0: /* L2L, byte */ - src_offset = ib[idx+2]; - src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+2); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", src_offset + count, radeon_bo_size(src_reloc->robj)); @@ -3216,12 +3216,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; - dst2_offset = ib[idx+2]; - dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32; - src_offset = ib[idx+3]; - src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; + dst2_offset = radeon_get_ib_value(p, idx+2); + dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+3); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3251,10 +3251,10 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) } } else { /* L2L, dw */ - src_offset = ib[idx+2]; - src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+2); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); @@ -3279,8 +3279,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); return -EINVAL; } - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", dst_offset, radeon_bo_size(dst_reloc->robj)); diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 69ec24ab8d63..9b2512bf1a46 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -2623,14 +2623,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } if (tiled) { - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); p->idx += count + 5; } else { - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; @@ -2658,32 +2658,32 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) /* detile bit */ if (idx_value & (1 << 31)) { /* tiled src, linear dst */ - src_offset = ib[idx+1]; + src_offset = radeon_get_ib_value(p, idx+1); src_offset <<= 8; ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); - dst_offset = ib[idx+5]; - dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+5); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; } else { /* linear src, tiled dst */ - src_offset = ib[idx+5]; - src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+5); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; - dst_offset = ib[idx+1]; + dst_offset = radeon_get_ib_value(p, idx+1); dst_offset <<= 8; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); } p->idx += 7; } else { if (p->family >= CHIP_RV770) { - src_offset = ib[idx+2]; - src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32; - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; + src_offset = radeon_get_ib_value(p, idx+2); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); @@ -2691,10 +2691,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; p->idx += 5; } else { - src_offset = ib[idx+2]; - src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32; - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16; + src_offset = radeon_get_ib_value(p, idx+2); + src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); @@ -2724,8 +2724,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p) DRM_ERROR("bad DMA_PACKET_WRITE\n"); return -EINVAL; } - dst_offset = ib[idx+1]; - dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16; + dst_offset = radeon_get_ib_value(p, idx+1); + dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); |