diff options
Diffstat (limited to 'drivers/gpu/drm/gud')
-rw-r--r-- | drivers/gpu/drm/gud/gud_connector.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/gud/gud_drv.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/gud/gud_internal.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/gud/gud_pipe.c | 223 |
4 files changed, 118 insertions, 117 deletions
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c index fa636206f232..034e78360d4f 100644 --- a/drivers/gpu/drm/gud/gud_connector.c +++ b/drivers/gpu/drm/gud/gud_connector.c @@ -303,7 +303,7 @@ static int gud_connector_atomic_check(struct drm_connector *connector, old_state->tv.margins.right != new_state->tv.margins.right || old_state->tv.margins.top != new_state->tv.margins.top || old_state->tv.margins.bottom != new_state->tv.margins.bottom || - old_state->tv.mode != new_state->tv.mode || + old_state->tv.legacy_mode != new_state->tv.legacy_mode || old_state->tv.brightness != new_state->tv.brightness || old_state->tv.contrast != new_state->tv.contrast || old_state->tv.flicker_reduction != new_state->tv.flicker_reduction || @@ -400,7 +400,7 @@ static int gud_connector_add_tv_mode(struct gud_device *gdrm, struct drm_connect for (i = 0; i < num_modes; i++) modes[i] = &buf[i * GUD_CONNECTOR_TV_MODE_NAME_LEN]; - ret = drm_mode_create_tv_properties(connector->dev, num_modes, modes); + ret = drm_mode_create_tv_properties_legacy(connector->dev, num_modes, modes); free: kfree(buf); if (ret < 0) @@ -424,7 +424,7 @@ gud_connector_property_lookup(struct drm_connector *connector, u16 prop) case GUD_PROPERTY_TV_BOTTOM_MARGIN: return config->tv_bottom_margin_property; case GUD_PROPERTY_TV_MODE: - return config->tv_mode_property; + return config->legacy_tv_mode_property; case GUD_PROPERTY_TV_BRIGHTNESS: return config->tv_brightness_property; case GUD_PROPERTY_TV_CONTRAST: @@ -454,7 +454,7 @@ static unsigned int *gud_connector_tv_state_val(u16 prop, struct drm_tv_connecto case GUD_PROPERTY_TV_BOTTOM_MARGIN: return &state->margins.bottom; case GUD_PROPERTY_TV_MODE: - return &state->mode; + return &state->legacy_mode; case GUD_PROPERTY_TV_BRIGHTNESS: return &state->brightness; case GUD_PROPERTY_TV_CONTRAST: @@ -539,7 +539,7 @@ static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_conn fallthrough; case GUD_PROPERTY_TV_HUE: /* This is a no-op if already added. */ - ret = drm_mode_create_tv_properties(drm, 0, NULL); + ret = drm_mode_create_tv_properties_legacy(drm, 0, NULL); if (ret) goto out; break; diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c index d57dab104358..5aac7cda0505 100644 --- a/drivers/gpu/drm/gud/gud_drv.c +++ b/drivers/gpu/drm/gud/gud_drv.c @@ -365,6 +365,7 @@ static void gud_debugfs_init(struct drm_minor *minor) static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = { .check = gud_pipe_check, .update = gud_pipe_update, + DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS }; static const struct drm_mode_config_funcs gud_mode_config_funcs = { diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h index e351a1f1420d..0d148a6f27aa 100644 --- a/drivers/gpu/drm/gud/gud_internal.h +++ b/drivers/gpu/drm/gud/gud_internal.h @@ -43,6 +43,7 @@ struct gud_device { struct drm_framebuffer *fb; struct drm_rect damage; bool prev_flush_failed; + void *shadow_buf; }; static inline struct gud_device *to_gud_device(struct drm_device *drm) diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index 7c6dc2bcd14a..dc16a92625d4 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -5,6 +5,7 @@ #include <linux/lz4.h> #include <linux/usb.h> +#include <linux/vmalloc.h> #include <linux/workqueue.h> #include <drm/drm_atomic.h> @@ -15,6 +16,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_print.h> #include <drm/drm_rect.h> @@ -24,17 +26,13 @@ #include "gud_internal.h" /* - * Some userspace rendering loops runs all displays in the same loop. + * Some userspace rendering loops run all displays in the same loop. * This means that a fast display will have to wait for a slow one. - * For this reason gud does flushing asynchronous by default. - * The down side is that in e.g. a single display setup userspace thinks - * the display is insanely fast since the driver reports back immediately - * that the flush/pageflip is done. This wastes CPU and power. - * Such users might want to set this module parameter to false. + * Such users might want to enable this module parameter. */ -static bool gud_async_flush = true; +static bool gud_async_flush; module_param_named(async_flush, gud_async_flush, bool, 0644); -MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=true]"); +MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=0]"); /* * FIXME: The driver is probably broken on Big Endian machines. @@ -152,32 +150,21 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma } static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb, + const struct iosys_map *src, bool cached_reads, const struct drm_format_info *format, struct drm_rect *rect, struct gud_set_buffer_req *req) { - struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach; u8 compression = gdrm->compression; - struct iosys_map map[DRM_FORMAT_MAX_PLANES]; - struct iosys_map map_data[DRM_FORMAT_MAX_PLANES]; struct iosys_map dst; void *vaddr, *buf; size_t pitch, len; - int ret = 0; pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect)); len = pitch * drm_rect_height(rect); if (len > gdrm->bulk_len) return -E2BIG; - ret = drm_gem_fb_vmap(fb, map, map_data); - if (ret) - return ret; - - vaddr = map_data[0].vaddr; - - ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); - if (ret) - goto vunmap; + vaddr = src[0].vaddr; retry: if (compression) buf = gdrm->compress_buf; @@ -192,29 +179,27 @@ retry: if (format != fb->format) { if (format->format == GUD_DRM_FORMAT_R1) { len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect); - if (!len) { - ret = -ENOMEM; - goto end_cpu_access; - } + if (!len) + return -ENOMEM; } else if (format->format == DRM_FORMAT_R8) { - drm_fb_xrgb8888_to_gray8(&dst, NULL, map_data, fb, rect); + drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect); } else if (format->format == DRM_FORMAT_RGB332) { - drm_fb_xrgb8888_to_rgb332(&dst, NULL, map_data, fb, rect); + drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect); } else if (format->format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(&dst, NULL, map_data, fb, rect, + drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, gud_is_big_endian()); } else if (format->format == DRM_FORMAT_RGB888) { - drm_fb_xrgb8888_to_rgb888(&dst, NULL, map_data, fb, rect); + drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect); } else { len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect); } } else if (gud_is_big_endian() && format->cpp[0] > 1) { - drm_fb_swab(&dst, NULL, map_data, fb, rect, !import_attach); - } else if (compression && !import_attach && pitch == fb->pitches[0]) { + drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads); + } else if (compression && cached_reads && pitch == fb->pitches[0]) { /* can compress directly from the framebuffer */ buf = vaddr + rect->y1 * pitch; } else { - drm_fb_memcpy(&dst, NULL, map_data, fb, rect); + drm_fb_memcpy(&dst, NULL, src, fb, rect); } memset(req, 0, sizeof(*req)); @@ -237,12 +222,7 @@ retry: req->compressed_length = cpu_to_le32(complen); } -end_cpu_access: - drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); -vunmap: - drm_gem_fb_vunmap(fb, map); - - return ret; + return 0; } struct gud_usb_bulk_context { @@ -285,6 +265,7 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len) } static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb, + const struct iosys_map *src, bool cached_reads, const struct drm_format_info *format, struct drm_rect *rect) { struct gud_set_buffer_req req; @@ -293,7 +274,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb, drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect)); - ret = gud_prep_flush(gdrm, fb, format, rect, &req); + ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req); if (ret) return ret; @@ -333,46 +314,51 @@ void gud_clear_damage(struct gud_device *gdrm) gdrm->damage.y2 = 0; } -static void gud_add_damage(struct gud_device *gdrm, struct drm_rect *damage) +static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb, + const struct iosys_map *src, bool cached_reads, + struct drm_rect *damage) { - gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1); - gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1); - gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2); - gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2); -} + const struct drm_format_info *format; + unsigned int i, lines; + size_t pitch; + int ret; -static void gud_retry_failed_flush(struct gud_device *gdrm, struct drm_framebuffer *fb, - struct drm_rect *damage) -{ - /* - * pipe_update waits for the worker when the display mode is going to change. - * This ensures that the width and height is still the same making it safe to - * add back the damage. - */ + format = fb->format; + if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format) + format = gdrm->xrgb8888_emulation_format; - mutex_lock(&gdrm->damage_lock); - if (!gdrm->fb) { - drm_framebuffer_get(fb); - gdrm->fb = fb; - } - gud_add_damage(gdrm, damage); - mutex_unlock(&gdrm->damage_lock); + /* Split update if it's too big */ + pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(damage)); + lines = drm_rect_height(damage); + + if (gdrm->bulk_len < lines * pitch) + lines = gdrm->bulk_len / pitch; + + for (i = 0; i < DIV_ROUND_UP(drm_rect_height(damage), lines); i++) { + struct drm_rect rect = *damage; - /* Retry only once to avoid a possible storm in case of continues errors. */ - if (!gdrm->prev_flush_failed) - queue_work(system_long_wq, &gdrm->work); - gdrm->prev_flush_failed = true; + rect.y1 += i * lines; + rect.y2 = min_t(u32, rect.y1 + lines, damage->y2); + + ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect); + if (ret) { + if (ret != -ENODEV && ret != -ECONNRESET && + ret != -ESHUTDOWN && ret != -EPROTO) + dev_err_ratelimited(fb->dev->dev, + "Failed to flush framebuffer: error=%d\n", ret); + gdrm->prev_flush_failed = true; + break; + } + } } void gud_flush_work(struct work_struct *work) { struct gud_device *gdrm = container_of(work, struct gud_device, work); - const struct drm_format_info *format; + struct iosys_map shadow_map; struct drm_framebuffer *fb; struct drm_rect damage; - unsigned int i, lines; - int idx, ret = 0; - size_t pitch; + int idx; if (!drm_dev_enter(&gdrm->drm, &idx)) return; @@ -380,6 +366,7 @@ void gud_flush_work(struct work_struct *work) mutex_lock(&gdrm->damage_lock); fb = gdrm->fb; gdrm->fb = NULL; + iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf); damage = gdrm->damage; gud_clear_damage(gdrm); mutex_unlock(&gdrm->damage_lock); @@ -387,59 +374,43 @@ void gud_flush_work(struct work_struct *work) if (!fb) goto out; - format = fb->format; - if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format) - format = gdrm->xrgb8888_emulation_format; - - /* Split update if it's too big */ - pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(&damage)); - lines = drm_rect_height(&damage); - - if (gdrm->bulk_len < lines * pitch) - lines = gdrm->bulk_len / pitch; - - for (i = 0; i < DIV_ROUND_UP(drm_rect_height(&damage), lines); i++) { - struct drm_rect rect = damage; - - rect.y1 += i * lines; - rect.y2 = min_t(u32, rect.y1 + lines, damage.y2); - - ret = gud_flush_rect(gdrm, fb, format, &rect); - if (ret) { - if (ret != -ENODEV && ret != -ECONNRESET && - ret != -ESHUTDOWN && ret != -EPROTO) { - bool prev_flush_failed = gdrm->prev_flush_failed; - - gud_retry_failed_flush(gdrm, fb, &damage); - if (!prev_flush_failed) - dev_err_ratelimited(fb->dev->dev, - "Failed to flush framebuffer: error=%d\n", ret); - } - break; - } - - gdrm->prev_flush_failed = false; - } + gud_flush_damage(gdrm, fb, &shadow_map, true, &damage); drm_framebuffer_put(fb); out: drm_dev_exit(idx); } -static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb, - struct drm_rect *damage) +static int gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb, + const struct iosys_map *src, struct drm_rect *damage) { struct drm_framebuffer *old_fb = NULL; + struct iosys_map shadow_map; mutex_lock(&gdrm->damage_lock); + if (!gdrm->shadow_buf) { + gdrm->shadow_buf = vzalloc(fb->pitches[0] * fb->height); + if (!gdrm->shadow_buf) { + mutex_unlock(&gdrm->damage_lock); + return -ENOMEM; + } + } + + iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf); + iosys_map_incr(&shadow_map, drm_fb_clip_offset(fb->pitches[0], fb->format, damage)); + drm_fb_memcpy(&shadow_map, fb->pitches, src, fb, damage); + if (fb != gdrm->fb) { old_fb = gdrm->fb; drm_framebuffer_get(fb); gdrm->fb = fb; } - gud_add_damage(gdrm, damage); + gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1); + gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1); + gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2); + gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2); mutex_unlock(&gdrm->damage_lock); @@ -447,6 +418,26 @@ static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer if (old_fb) drm_framebuffer_put(old_fb); + + return 0; +} + +static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer *fb, + const struct iosys_map *src, struct drm_rect *damage) +{ + int ret; + + if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) + drm_rect_init(damage, 0, 0, fb->width, fb->height); + + if (gud_async_flush) { + ret = gud_fb_queue_damage(gdrm, fb, src, damage); + if (ret != -ENOMEM) + return; + } + + /* Imported buffers are assumed to be WriteCombined with uncached reads */ + gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage); } int gud_pipe_check(struct drm_simple_display_pipe *pipe, @@ -571,10 +562,11 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_device *drm = pipe->crtc.dev; struct gud_device *gdrm = to_gud_device(drm); struct drm_plane_state *state = pipe->plane.state; + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); struct drm_framebuffer *fb = state->fb; struct drm_crtc *crtc = &pipe->crtc; struct drm_rect damage; - int idx; + int ret, idx; if (crtc->state->mode_changed || !crtc->state->enable) { cancel_work_sync(&gdrm->work); @@ -584,6 +576,8 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe, gdrm->fb = NULL; } gud_clear_damage(gdrm); + vfree(gdrm->shadow_buf); + gdrm->shadow_buf = NULL; mutex_unlock(&gdrm->damage_lock); } @@ -599,14 +593,19 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe, if (crtc->state->active_changed) gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active); - if (drm_atomic_helper_damage_merged(old_state, state, &damage)) { - if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) - drm_rect_init(&damage, 0, 0, fb->width, fb->height); - gud_fb_queue_damage(gdrm, fb, &damage); - if (!gud_async_flush) - flush_work(&gdrm->work); - } + if (!fb) + goto ctrl_disable; + + ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); + if (ret) + goto ctrl_disable; + + if (drm_atomic_helper_damage_merged(old_state, state, &damage)) + gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage); + + drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); +ctrl_disable: if (!crtc->state->enable) gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0); |