diff options
author | Liu Ying <gnuiyl@gmail.com> | 2016-07-08 11:40:59 +0200 |
---|---|---|
committer | Philipp Zabel <p.zabel@pengutronix.de> | 2016-07-12 18:24:03 +0200 |
commit | 5f2f911578fb13b0110e125d43775f08cf1dd281 (patch) | |
tree | 06cb1b02843dd9c874504e143af14d2546c2a985 | |
parent | drm/imx: Remove encoders' ->prepare callbacks (diff) | |
download | linux-5f2f911578fb13b0110e125d43775f08cf1dd281.tar.xz linux-5f2f911578fb13b0110e125d43775f08cf1dd281.zip |
drm/imx: atomic phase 3 step 1: Use atomic configuration
Replacing drm_crtc_helper_set_config() by drm_atomic_helper_set_config()
and converting the suspend/resume operations to atomic make us be able
to use atomic configurations. All of these allow us to remove the
crtc_funcs->mode_set callback as it is no longer used. Also, change
the plane_funcs->update/disable_plane callbacks from the transitional
version to the atomic version. Furthermore, switching to the pure atomic
version of set_config callback means that we may implement CRTC/plane
atomic checks by using the new CRTC/plane states instead of the legacy
ones and we may remove the private ipu_crtc->enabled state which was left
there for the transitional atomic helpers in phase 1. Page flip is also
switched to the atomic version. Last, the legacy function
drm_helper_disable_unused_functions() is removed from ->load in order
not to confuse the atomic driver.
Signed-off-by: Liu Ying <gnuiyl@gmail.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Philipp Zabel <p.zabel@pengutronix.de>
-rw-r--r-- | drivers/gpu/drm/imx/imx-drm-core.c | 76 | ||||
-rw-r--r-- | drivers/gpu/drm/imx/ipuv3-crtc.c | 209 | ||||
-rw-r--r-- | drivers/gpu/drm/imx/ipuv3-plane.c | 135 | ||||
-rw-r--r-- | drivers/gpu/drm/imx/ipuv3-plane.h | 2 |
4 files changed, 114 insertions, 308 deletions
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index f6e44c220874..f14ad2bbc1d7 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -15,10 +15,14 @@ */ #include <linux/component.h> #include <linux/device.h> +#include <linux/dma-buf.h> #include <linux/fb.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/reservation.h> #include <drm/drmP.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_gem_cma_helper.h> @@ -41,6 +45,7 @@ struct imx_drm_device { struct imx_drm_crtc *crtc[MAX_CRTC]; unsigned int pipes; struct drm_fbdev_cma *fbhelper; + struct drm_atomic_state *state; }; struct imx_drm_crtc { @@ -169,6 +174,63 @@ static void imx_drm_output_poll_changed(struct drm_device *drm) static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = imx_drm_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane_state *plane_state; + struct drm_gem_cma_object *cma_obj; + struct fence *excl; + unsigned shared_count; + struct fence **shared; + unsigned int i, j; + int ret; + + /* Wait for fences. */ + for_each_crtc_in_state(state, crtc, crtc_state, i) { + plane_state = crtc->primary->state; + if (plane_state->fb) { + cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0); + if (cma_obj->base.dma_buf) { + ret = reservation_object_get_fences_rcu( + cma_obj->base.dma_buf->resv, &excl, + &shared_count, &shared); + if (unlikely(ret)) + DRM_ERROR("failed to get fences " + "for buffer\n"); + + if (excl) { + fence_wait(excl, false); + fence_put(excl); + } + for (j = 0; j < shared_count; i++) { + fence_wait(shared[j], false); + fence_put(shared[j]); + } + } + } + } + + drm_atomic_helper_commit_modeset_disables(dev, state); + + drm_atomic_helper_commit_planes(dev, state, true); + + drm_atomic_helper_commit_modeset_enables(dev, state); + + drm_atomic_helper_commit_hw_done(state); + + drm_atomic_helper_wait_for_vblanks(dev, state); + + drm_atomic_helper_cleanup_planes(dev, state); +} + +static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = { + .atomic_commit_tail = imx_drm_atomic_commit_tail, }; /* @@ -210,6 +272,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) drm->mode_config.max_width = 4096; drm->mode_config.max_height = 4096; drm->mode_config.funcs = &imx_drm_mode_config_funcs; + drm->mode_config.helper_private = &imx_drm_mode_config_helpers; drm_mode_config_init(drm); @@ -252,7 +315,6 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags) dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); legacyfb_depth = 16; } - drm_helper_disable_unused_functions(drm); imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, drm->mode_config.num_crtc, MAX_CRTC); if (IS_ERR(imxdrm->fbhelper)) { @@ -454,6 +516,7 @@ static int imx_drm_platform_remove(struct platform_device *pdev) static int imx_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct imx_drm_device *imxdrm; /* The drm_dev is NULL before .load hook is called */ if (drm_dev == NULL) @@ -461,17 +524,26 @@ static int imx_drm_suspend(struct device *dev) drm_kms_helper_poll_disable(drm_dev); + imxdrm = drm_dev->dev_private; + imxdrm->state = drm_atomic_helper_suspend(drm_dev); + if (IS_ERR(imxdrm->state)) { + drm_kms_helper_poll_enable(drm_dev); + return PTR_ERR(imxdrm->state); + } + return 0; } static int imx_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct imx_drm_device *imx_drm; if (drm_dev == NULL) return 0; - drm_helper_resume_force_mode(drm_dev); + imx_drm = drm_dev->dev_private; + drm_atomic_helper_resume(drm_dev, imx_drm->state); drm_kms_helper_poll_enable(drm_dev); return 0; diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index f9d5d7c5cd79..3e8253455121 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -24,8 +24,6 @@ #include <linux/fb.h> #include <linux/clk.h> #include <linux/errno.h> -#include <linux/reservation.h> -#include <linux/dma-buf.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_fb_cma_helper.h> @@ -35,23 +33,6 @@ #define DRIVER_DESC "i.MX IPUv3 Graphics" -enum ipu_flip_status { - IPU_FLIP_NONE, - IPU_FLIP_PENDING, - IPU_FLIP_SUBMITTED, -}; - -struct ipu_flip_work { - struct work_struct unref_work; - struct drm_gem_object *bo; - struct drm_pending_vblank_event *page_flip_event; - struct work_struct fence_work; - struct ipu_crtc *crtc; - struct fence *excl; - unsigned shared_count; - struct fence **shared; -}; - struct ipu_crtc { struct device *dev; struct drm_crtc base; @@ -62,10 +43,6 @@ struct ipu_crtc { struct ipu_dc *dc; struct ipu_di *di; - int enabled; - enum ipu_flip_status flip_state; - struct workqueue_struct *flip_queue; - struct ipu_flip_work *flip_work; int irq; }; @@ -75,34 +52,26 @@ static void ipu_crtc_enable(struct ipu_crtc *ipu_crtc) { struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); - if (ipu_crtc->enabled) - return; - ipu_dc_enable(ipu); ipu_dc_enable_channel(ipu_crtc->dc); ipu_di_enable(ipu_crtc->di); - ipu_crtc->enabled = 1; - - /* - * In order not to be warned on enabling vblank failure, - * we should call drm_crtc_vblank_on() after ->enabled is set to 1. - */ - drm_crtc_vblank_on(&ipu_crtc->base); } static void ipu_crtc_disable(struct ipu_crtc *ipu_crtc) { struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); - - if (!ipu_crtc->enabled) - return; + struct drm_crtc *crtc = &ipu_crtc->base; ipu_dc_disable_channel(ipu_crtc->dc); ipu_di_disable(ipu_crtc->di); ipu_dc_disable(ipu); - ipu_crtc->enabled = 0; - drm_crtc_vblank_off(&ipu_crtc->base); + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); } static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode) @@ -123,151 +92,21 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode) } } -static void ipu_flip_unref_work_func(struct work_struct *__work) -{ - struct ipu_flip_work *work = - container_of(__work, struct ipu_flip_work, unref_work); - - drm_gem_object_unreference_unlocked(work->bo); - kfree(work); -} - -static void ipu_flip_fence_work_func(struct work_struct *__work) -{ - struct ipu_flip_work *work = - container_of(__work, struct ipu_flip_work, fence_work); - int i; - - /* wait for all fences attached to the FB obj to signal */ - if (work->excl) { - fence_wait(work->excl, false); - fence_put(work->excl); - } - for (i = 0; i < work->shared_count; i++) { - fence_wait(work->shared[i], false); - fence_put(work->shared[i]); - } - - work->crtc->flip_state = IPU_FLIP_SUBMITTED; -} - -static int ipu_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t page_flip_flags) -{ - struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); - struct ipu_flip_work *flip_work; - int ret; - - if (ipu_crtc->flip_state != IPU_FLIP_NONE) - return -EBUSY; - - ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc); - if (ret) { - dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n"); - list_del(&event->base.link); - - return ret; - } - - flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL); - if (!flip_work) { - ret = -ENOMEM; - goto put_vblank; - } - INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func); - flip_work->page_flip_event = event; - - /* get BO backing the old framebuffer and take a reference */ - flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base; - drm_gem_object_reference(flip_work->bo); - - ipu_crtc->flip_work = flip_work; - /* - * If the object has a DMABUF attached, we need to wait on its fences - * if there are any. - */ - if (cma_obj->base.dma_buf) { - INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func); - flip_work->crtc = ipu_crtc; - - ret = reservation_object_get_fences_rcu( - cma_obj->base.dma_buf->resv, &flip_work->excl, - &flip_work->shared_count, &flip_work->shared); - - if (unlikely(ret)) { - DRM_ERROR("failed to get fences for buffer\n"); - goto free_flip_work; - } - - /* No need to queue the worker if the are no fences */ - if (!flip_work->excl && !flip_work->shared_count) { - ipu_crtc->flip_state = IPU_FLIP_SUBMITTED; - } else { - ipu_crtc->flip_state = IPU_FLIP_PENDING; - queue_work(ipu_crtc->flip_queue, - &flip_work->fence_work); - } - } else { - ipu_crtc->flip_state = IPU_FLIP_SUBMITTED; - } - - if (crtc->primary->state) - drm_atomic_set_fb_for_plane(crtc->primary->state, fb); - - return 0; - -free_flip_work: - drm_gem_object_unreference_unlocked(flip_work->bo); - kfree(flip_work); - ipu_crtc->flip_work = NULL; -put_vblank: - imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); - - return ret; -} - static const struct drm_crtc_funcs ipu_crtc_funcs = { - .set_config = drm_crtc_helper_set_config, + .set_config = drm_atomic_helper_set_config, .destroy = drm_crtc_cleanup, - .page_flip = ipu_page_flip, + .page_flip = drm_atomic_helper_page_flip, .reset = drm_atomic_helper_crtc_reset, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, }; -static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) -{ - unsigned long flags; - struct drm_device *drm = ipu_crtc->base.dev; - struct ipu_flip_work *work = ipu_crtc->flip_work; - - spin_lock_irqsave(&drm->event_lock, flags); - if (work->page_flip_event) - drm_crtc_send_vblank_event(&ipu_crtc->base, - work->page_flip_event); - imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); - spin_unlock_irqrestore(&drm->event_lock, flags); -} - static irqreturn_t ipu_irq_handler(int irq, void *dev_id) { struct ipu_crtc *ipu_crtc = dev_id; imx_drm_handle_vblank(ipu_crtc->imx_crtc); - if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) { - struct ipu_plane *plane = ipu_crtc->plane[0]; - - ipu_plane_set_base(plane, ipu_crtc->base.primary->fb); - ipu_crtc_handle_pageflip(ipu_crtc); - queue_work(ipu_crtc->flip_queue, - &ipu_crtc->flip_work->unref_work); - ipu_crtc->flip_state = IPU_FLIP_NONE; - } - return IRQ_HANDLED; } @@ -310,9 +149,26 @@ static void ipu_crtc_commit(struct drm_crtc *crtc) static int ipu_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { + u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary); + + if (state->active && (primary_plane_mask & state->plane_mask) == 0) + return -EINVAL; + return 0; } +static void ipu_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + WARN_ON(drm_crtc_vblank_get(crtc)); + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); +} + static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -371,25 +227,17 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) static const struct drm_crtc_helper_funcs ipu_helper_funcs = { .dpms = ipu_crtc_dpms, .mode_fixup = ipu_crtc_mode_fixup, - .mode_set = drm_helper_crtc_mode_set, .mode_set_nofb = ipu_crtc_mode_set_nofb, .prepare = ipu_crtc_prepare, .commit = ipu_crtc_commit, .atomic_check = ipu_crtc_atomic_check, + .atomic_begin = ipu_crtc_atomic_begin, }; static int ipu_enable_vblank(struct drm_crtc *crtc) { struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); - /* - * ->commit is done after ->mode_set in drm_crtc_helper_set_mode(), - * so waiting for vblank in drm_plane_helper_commit() will timeout. - * Check the state here to avoid the waiting. - */ - if (!ipu_crtc->enabled) - return -EINVAL; - enable_irq(ipu_crtc->irq); return 0; @@ -508,8 +356,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, /* Only enable IRQ when we actually need it to trigger work. */ disable_irq(ipu_crtc->irq); - ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip"); - return 0; err_put_plane1_res: @@ -554,7 +400,6 @@ static void ipu_drm_unbind(struct device *dev, struct device *master, imx_drm_remove_crtc(ipu_crtc->imx_crtc); - destroy_workqueue(ipu_crtc->flip_queue); ipu_put_resources(ipu_crtc); if (ipu_crtc->plane[1]) ipu_plane_put_resources(ipu_crtc->plane[1]); diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index afbc7402bff1..3f5f9566b152 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -14,6 +14,7 @@ */ #include <drm/drmP.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> @@ -55,122 +56,6 @@ int ipu_plane_irq(struct ipu_plane *ipu_plane) IPU_IRQ_EOF); } -int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb) -{ - struct drm_gem_cma_object *cma_obj[3], *old_cma_obj[3]; - struct drm_plane_state *state = ipu_plane->base.state; - struct drm_framebuffer *old_fb = state->fb; - unsigned long eba, ubo, vbo, old_eba, old_ubo, old_vbo; - int active, i; - int x = state->src_x >> 16; - int y = state->src_y >> 16; - - for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { - cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i); - if (!cma_obj[i]) { - DRM_DEBUG_KMS("plane %d entry is null.\n", i); - return -EFAULT; - } - } - - for (i = 0; i < drm_format_num_planes(old_fb->pixel_format); i++) { - old_cma_obj[i] = drm_fb_cma_get_gem_obj(old_fb, i); - if (!old_cma_obj[i]) { - DRM_DEBUG_KMS("plane %d entry is null.\n", i); - return -EFAULT; - } - } - - eba = cma_obj[0]->paddr + fb->offsets[0] + - fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x; - - if (eba & 0x7) { - DRM_DEBUG_KMS("base address must be a multiple of 8.\n"); - return -EINVAL; - } - - if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) { - DRM_DEBUG_KMS("pitches out of range.\n"); - return -EINVAL; - } - - if (fb->pitches[0] != old_fb->pitches[0]) { - DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n"); - return -EINVAL; - } - - switch (fb->pixel_format) { - case DRM_FORMAT_YUV420: - case DRM_FORMAT_YVU420: - /* - * Multiplanar formats have to meet the following restrictions: - * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO - * - EBA, UBO and VBO are a multiple of 8 - * - UBO and VBO are unsigned and not larger than 0xfffff8 - * - Only EBA may be changed while scanout is active - * - The strides of U and V planes must be identical. - */ - ubo = cma_obj[1]->paddr + fb->offsets[1] + - fb->pitches[1] * y / 2 + x / 2 - eba; - vbo = cma_obj[2]->paddr + fb->offsets[2] + - fb->pitches[2] * y / 2 + x / 2 - eba; - - old_eba = old_cma_obj[0]->paddr + old_fb->offsets[0] + - old_fb->pitches[0] * y + - (old_fb->bits_per_pixel >> 3) * x; - old_ubo = old_cma_obj[1]->paddr + old_fb->offsets[1] + - old_fb->pitches[1] * y / 2 + x / 2 - old_eba; - old_vbo = old_cma_obj[2]->paddr + old_fb->offsets[2] + - old_fb->pitches[2] * y / 2 + x / 2 - old_eba; - - if ((ubo & 0x7) || (vbo & 0x7)) { - DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n"); - return -EINVAL; - } - - if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) { - DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n"); - return -EINVAL; - } - - if (old_ubo != ubo || old_vbo != vbo) { - DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n"); - return -EINVAL; - } - - if (fb->pitches[1] != fb->pitches[2]) { - DRM_DEBUG_KMS("U/V pitches must be identical.\n"); - return -EINVAL; - } - - if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) { - DRM_DEBUG_KMS("U/V pitches out of range.\n"); - return -EINVAL; - } - - if (old_fb->pitches[1] != fb->pitches[1]) { - DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n"); - return -EINVAL; - } - - dev_dbg(ipu_plane->base.dev->dev, - "phys = %pad %pad %pad, x = %d, y = %d", - &cma_obj[0]->paddr, &cma_obj[1]->paddr, - &cma_obj[2]->paddr, x, y); - break; - default: - dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d", - &cma_obj[0]->paddr, x, y); - break; - } - - active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); - ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); - ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); - - return 0; -} - static inline unsigned long drm_plane_state_to_eba(struct drm_plane_state *state) { @@ -360,8 +245,8 @@ static void ipu_plane_destroy(struct drm_plane *plane) } static const struct drm_plane_funcs ipu_plane_funcs = { - .update_plane = drm_plane_helper_update, - .disable_plane = drm_plane_helper_disable, + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, .destroy = ipu_plane_destroy, .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, @@ -380,10 +265,18 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, /* Ok to disable */ if (!fb) - return old_fb ? 0 : -EINVAL; + return 0; + + if (!state->crtc) + return -EINVAL; + + crtc_state = + drm_atomic_get_existing_crtc_state(state->state, state->crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; /* CRTC should be enabled */ - if (!state->crtc->enabled) + if (!crtc_state->enable) return -EINVAL; /* no scaling */ @@ -391,8 +284,6 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, state->src_h >> 16 != state->crtc_h) return -EINVAL; - crtc_state = state->crtc->state; - switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: /* full plane doesn't support partial off screen */ diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h index c51a44ba3ddd..338b88a74eb6 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.h +++ b/drivers/gpu/drm/imx/ipuv3-plane.h @@ -37,8 +37,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h, bool interlaced); -int ipu_plane_set_base(struct ipu_plane *plane, struct drm_framebuffer *fb); - int ipu_plane_get_resources(struct ipu_plane *plane); void ipu_plane_put_resources(struct ipu_plane *plane); |