diff options
author | Dave Airlie <airlied@redhat.com> | 2016-10-04 04:43:31 +0200 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2016-10-04 04:43:31 +0200 |
commit | f5dce66593ffb76d750918d6f1962fd62fb54d64 (patch) | |
tree | 05e79a3257a9d05728bca2592ba07c073724ea68 | |
parent | Merge branch 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux into... (diff) | |
parent | drm/exynos: g2d: simplify g2d_free_runqueue_node() (diff) | |
download | linux-f5dce66593ffb76d750918d6f1962fd62fb54d64.tar.xz linux-f5dce66593ffb76d750918d6f1962fd62fb54d64.zip |
Merge branch 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos into drm-next
This pull request includes,
- Code refactoring on HDMI DDC and PHY.
- Regression fixup on deadlock issue with G2D pm integration.
- Fixup on page fault issue with wait_for_vblank mechianism specific to Exynos drm.
- And some cleanups.
* 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos:
drm/exynos: g2d: simplify g2d_free_runqueue_node()
drm/exynos: g2d: use autosuspend mode for PM runtime
drm/exynos: g2d: wait for engine to finish
drm/exynos: g2d: remove runqueue nodes in g2d_{close,remove}()
drm/exynos: g2d: move PM management to runqueue worker
Revert "drm/exynos: g2d: fix system and runtime pm integration"
drm/exynos: use drm core to handle page-flip event
drm/exynos: mark exynos_dp_crtc_clock_enable() static
drm/exynos/fimd: add clock rate checking
drm/exynos: fix pending update handling
drm/exynos/vidi: use timer for vblanks instead of sleeping worker
drm/exynos: g2d: beautify probing message
drm/exynos: mixer: simplify loop in vp_win_reset()
drm/exynos: mixer: convert booleans to flags in mixer context
gpu: drm: exynos_hdmi: Remove duplicate initialization of regulator bulk consumer
gpu: drm: exynos_hdmi: Move PHY logic into single function
gpu: drm: exynos_hdmi: Move DDC logic into single function
-rw-r--r-- | drivers/gpu/drm/exynos/exynos5433_drm_decon.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos7_drm_decon.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_dp.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_crtc.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_drv.c | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_drv.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_fimd.c | 54 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_g2d.c | 239 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_plane.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_vidi.c | 76 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_hdmi.c | 112 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_mixer.c | 68 |
12 files changed, 352 insertions, 326 deletions
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index ac21b4000835..6ca1f3117fe8 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -551,7 +551,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) { struct decon_context *ctx = dev_id; u32 val; - int win; if (!test_bit(BIT_CLKS_ENABLED, &ctx->flags)) goto out; @@ -560,16 +559,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) val &= VIDINTCON1_INTFRMDONEPEND | VIDINTCON1_INTFRMPEND; if (val) { - for (win = ctx->first_win; win < WINDOWS_NR ; win++) { - struct exynos_drm_plane *plane = &ctx->planes[win]; - - if (!plane->pending_fb) - continue; - - exynos_drm_crtc_finish_update(ctx->crtc, plane); - } - - /* clear */ writel(val, ctx->addr + DECON_VIDINTCON1); drm_crtc_handle_vblank(&ctx->crtc->base); } diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 7f9901b7777b..f4d5a2133777 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -603,7 +603,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) { struct decon_context *ctx = (struct decon_context *)dev_id; u32 val, clear_bit; - int win; val = readl(ctx->regs + VIDINTCON1); @@ -617,14 +616,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) if (!ctx->i80_if) { drm_crtc_handle_vblank(&ctx->crtc->base); - for (win = 0 ; win < WINDOWS_NR ; win++) { - struct exynos_drm_plane *plane = &ctx->planes[win]; - - if (!plane->pending_fb) - continue; - - exynos_drm_crtc_finish_update(ctx->crtc, plane); - } /* set wait vsync event to zero and wake up queue. */ if (atomic_read(&ctx->wait_vsync_event)) { diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 4f0850585b8e..528229faffe4 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -43,7 +43,7 @@ struct exynos_dp_device { struct analogix_dp_plat_data plat_data; }; -int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data, +static int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data, bool enable) { struct exynos_dp_device *dp = to_dp(plat_data); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 785ffa6cc309..2530bf57716a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -69,8 +69,6 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc, { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - exynos_crtc->event = crtc->state->event; - if (exynos_crtc->ops->atomic_begin) exynos_crtc->ops->atomic_begin(exynos_crtc); } @@ -79,9 +77,24 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + struct drm_pending_vblank_event *event; + unsigned long flags; if (exynos_crtc->ops->atomic_flush) exynos_crtc->ops->atomic_flush(exynos_crtc); + + event = crtc->state->event; + if (event) { + crtc->state->event = NULL; + + spin_lock_irqsave(&crtc->dev->event_lock, flags); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, event); + else + drm_crtc_send_vblank_event(crtc, event); + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + } + } static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { @@ -134,8 +147,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, exynos_crtc->ops = ops; exynos_crtc->ctx = ctx; - init_waitqueue_head(&exynos_crtc->wait_update); - crtc = &exynos_crtc->base; private->crtc[pipe] = crtc; @@ -175,32 +186,6 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe) exynos_crtc->ops->disable_vblank(exynos_crtc); } -void exynos_drm_crtc_wait_pending_update(struct exynos_drm_crtc *exynos_crtc) -{ - wait_event_timeout(exynos_crtc->wait_update, - (atomic_read(&exynos_crtc->pending_update) == 0), - msecs_to_jiffies(50)); -} - -void exynos_drm_crtc_finish_update(struct exynos_drm_crtc *exynos_crtc, - struct exynos_drm_plane *exynos_plane) -{ - struct drm_crtc *crtc = &exynos_crtc->base; - unsigned long flags; - - exynos_plane->pending_fb = NULL; - - if (atomic_dec_and_test(&exynos_crtc->pending_update)) - wake_up(&exynos_crtc->wait_update); - - spin_lock_irqsave(&crtc->dev->event_lock, flags); - if (exynos_crtc->event) - drm_crtc_send_vblank_event(crtc, exynos_crtc->event); - - exynos_crtc->event = NULL; - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); -} - int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, enum exynos_drm_output_type out_type) { @@ -228,20 +213,19 @@ void exynos_drm_crtc_te_handler(struct drm_crtc *crtc) void exynos_drm_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) { - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); struct drm_pending_vblank_event *e; unsigned long flags; spin_lock_irqsave(&crtc->dev->event_lock, flags); - e = exynos_crtc->event; - if (e && e->base.file_priv == file) { - exynos_crtc->event = NULL; - atomic_dec(&exynos_crtc->pending_update); - } + e = crtc->state->event; + if (e && e->base.file_priv == file) + crtc->state->event = NULL; + else + e = NULL; spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - if (e && e->base.file_priv == file) + if (e) drm_event_cancel_free(crtc->dev, &e->base); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 486943e70f70..def78c8c1780 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -45,37 +45,11 @@ struct exynos_atomic_commit { u32 crtcs; }; -static void exynos_atomic_wait_for_commit(struct drm_atomic_state *state) -{ - struct drm_crtc_state *crtc_state; - struct drm_crtc *crtc; - int i, ret; - - for_each_crtc_in_state(state, crtc, crtc_state, i) { - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (!crtc->state->enable) - continue; - - ret = drm_crtc_vblank_get(crtc); - if (ret) - continue; - - exynos_drm_crtc_wait_pending_update(exynos_crtc); - drm_crtc_vblank_put(crtc); - } -} - static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit) { struct drm_device *dev = commit->dev; struct exynos_drm_private *priv = dev->dev_private; struct drm_atomic_state *state = commit->state; - struct drm_plane *plane; - struct drm_crtc *crtc; - struct drm_plane_state *plane_state; - struct drm_crtc_state *crtc_state; - int i; drm_atomic_helper_commit_modeset_disables(dev, state); @@ -89,25 +63,9 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit) * have the relevant clocks enabled to perform the update. */ - for_each_crtc_in_state(state, crtc, crtc_state, i) { - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - atomic_set(&exynos_crtc->pending_update, 0); - } - - for_each_plane_in_state(state, plane, plane_state, i) { - struct exynos_drm_crtc *exynos_crtc = - to_exynos_crtc(plane->crtc); - - if (!plane->crtc) - continue; - - atomic_inc(&exynos_crtc->pending_update); - } - drm_atomic_helper_commit_planes(dev, state, 0); - exynos_atomic_wait_for_commit(state); + drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_cleanup_planes(dev, state); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index 7f1a49d5bdbe..d215149e737b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -86,7 +86,6 @@ struct exynos_drm_plane { struct drm_plane base; const struct exynos_drm_plane_config *config; unsigned int index; - struct drm_framebuffer *pending_fb; }; #define EXYNOS_DRM_PLANE_CAP_DOUBLE (1 << 0) @@ -172,9 +171,6 @@ struct exynos_drm_crtc { struct drm_crtc base; enum exynos_drm_output_type type; unsigned int pipe; - struct drm_pending_vblank_event *event; - wait_queue_head_t wait_update; - atomic_t pending_update; const struct exynos_drm_crtc_ops *ops; void *ctx; struct exynos_drm_clk *pipe_clk; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index d47216488985..e2e405170d35 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -198,6 +198,7 @@ struct fimd_context { atomic_t wait_vsync_event; atomic_t win_updated; atomic_t triggering; + u32 clkdiv; const struct fimd_driver_data *driver_data; struct drm_encoder *encoder; @@ -389,15 +390,18 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc) pm_runtime_put(ctx->dev); } -static u32 fimd_calc_clkdiv(struct fimd_context *ctx, - const struct drm_display_mode *mode) + +static int fimd_atomic_check(struct exynos_drm_crtc *crtc, + struct drm_crtc_state *state) { - unsigned long ideal_clk; + struct drm_display_mode *mode = &state->adjusted_mode; + struct fimd_context *ctx = crtc->ctx; + unsigned long ideal_clk, lcd_rate; u32 clkdiv; if (mode->clock == 0) { - DRM_ERROR("Mode has zero clock value.\n"); - return 0xff; + DRM_INFO("Mode has zero clock value.\n"); + return -EINVAL; } ideal_clk = mode->clock * 1000; @@ -410,10 +414,23 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, ideal_clk *= 2; } + lcd_rate = clk_get_rate(ctx->lcd_clk); + if (2 * lcd_rate < ideal_clk) { + DRM_INFO("sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n", + lcd_rate, ideal_clk); + return -EINVAL; + } + /* Find the clock divider value that gets us closest to ideal_clk */ - clkdiv = DIV_ROUND_CLOSEST(clk_get_rate(ctx->lcd_clk), ideal_clk); + clkdiv = DIV_ROUND_CLOSEST(lcd_rate, ideal_clk); + if (clkdiv >= 0x200) { + DRM_INFO("requested pixel clock(%lu) too low\n", ideal_clk); + return -EINVAL; + } + + ctx->clkdiv = (clkdiv < 0x100) ? clkdiv : 0xff; - return (clkdiv < 0x100) ? clkdiv : 0xff; + return 0; } static void fimd_setup_trigger(struct fimd_context *ctx) @@ -442,7 +459,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc) struct drm_display_mode *mode = &crtc->base.state->adjusted_mode; const struct fimd_driver_data *driver_data = ctx->driver_data; void *timing_base = ctx->regs + driver_data->timing_base; - u32 val, clkdiv; + u32 val; if (ctx->suspended) return; @@ -543,9 +560,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc) if (ctx->driver_data->has_clksel) val |= VIDCON0_CLKSEL_LCD; - clkdiv = fimd_calc_clkdiv(ctx, mode); - if (clkdiv > 1) - val |= VIDCON0_CLKVAL_F(clkdiv - 1) | VIDCON0_CLKDIR; + if (ctx->clkdiv > 1) + val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR; writel(val, ctx->regs + VIDCON0); } @@ -939,14 +955,14 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = { .update_plane = fimd_update_plane, .disable_plane = fimd_disable_plane, .atomic_flush = fimd_atomic_flush, + .atomic_check = fimd_atomic_check, .te_handler = fimd_te_handler, }; static irqreturn_t fimd_irq_handler(int irq, void *dev_id) { struct fimd_context *ctx = (struct fimd_context *)dev_id; - u32 val, clear_bit, start, start_s; - int win; + u32 val, clear_bit; val = readl(ctx->regs + VIDINTCON1); @@ -961,18 +977,6 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id) if (!ctx->i80_if) drm_crtc_handle_vblank(&ctx->crtc->base); - for (win = 0 ; win < WINDOWS_NR ; win++) { - struct exynos_drm_plane *plane = &ctx->planes[win]; - - if (!plane->pending_fb) - continue; - - start = readl(ctx->regs + VIDWx_BUF_START(win, 0)); - start_s = readl(ctx->regs + VIDWx_BUF_START_S(win, 0)); - if (start == start_s) - exynos_drm_crtc_finish_update(ctx->crtc, plane); - } - if (ctx->i80_if) { /* Exits triggering mode */ atomic_set(&ctx->triggering, 0); diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 6eca8bb88648..aa92decf4233 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -138,6 +138,18 @@ enum g2d_reg_type { MAX_REG_TYPE_NR }; +enum g2d_flag_bits { + /* + * If set, suspends the runqueue worker after the currently + * processed node is finished. + */ + G2D_BIT_SUSPEND_RUNQUEUE, + /* + * If set, indicates that the engine is currently busy. + */ + G2D_BIT_ENGINE_BUSY, +}; + /* cmdlist data structure */ struct g2d_cmdlist { u32 head; @@ -226,7 +238,7 @@ struct g2d_data { struct workqueue_struct *g2d_workq; struct work_struct runqueue_work; struct exynos_drm_subdrv subdrv; - bool suspended; + unsigned long flags; /* cmdlist */ struct g2d_cmdlist_node *cmdlist_node; @@ -246,6 +258,12 @@ struct g2d_data { unsigned long max_pool; }; +static inline void g2d_hw_reset(struct g2d_data *g2d) +{ + writel(G2D_R | G2D_SFRCLEAR, g2d->regs + G2D_SOFT_RESET); + clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); +} + static int g2d_init_cmdlist(struct g2d_data *g2d) { struct device *dev = g2d->dev; @@ -803,12 +821,8 @@ static void g2d_dma_start(struct g2d_data *g2d, struct g2d_cmdlist_node *node = list_first_entry(&runqueue_node->run_cmdlist, struct g2d_cmdlist_node, list); - int ret; - - ret = pm_runtime_get_sync(g2d->dev); - if (ret < 0) - return; + set_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); } @@ -831,9 +845,6 @@ static void g2d_free_runqueue_node(struct g2d_data *g2d, { struct g2d_cmdlist_node *node; - if (!runqueue_node) - return; - mutex_lock(&g2d->cmdlist_mutex); /* * commands in run_cmdlist have been completed so unmap all gem @@ -847,29 +858,65 @@ static void g2d_free_runqueue_node(struct g2d_data *g2d, kmem_cache_free(g2d->runqueue_slab, runqueue_node); } -static void g2d_exec_runqueue(struct g2d_data *g2d) +/** + * g2d_remove_runqueue_nodes - remove items from the list of runqueue nodes + * @g2d: G2D state object + * @file: if not zero, only remove items with this DRM file + * + * Has to be called under runqueue lock. + */ +static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file* file) { - g2d->runqueue_node = g2d_get_runqueue_node(g2d); - if (g2d->runqueue_node) - g2d_dma_start(g2d, g2d->runqueue_node); + struct g2d_runqueue_node *node, *n; + + if (list_empty(&g2d->runqueue)) + return; + + list_for_each_entry_safe(node, n, &g2d->runqueue, list) { + if (file && node->filp != file) + continue; + + list_del_init(&node->list); + g2d_free_runqueue_node(g2d, node); + } } static void g2d_runqueue_worker(struct work_struct *work) { struct g2d_data *g2d = container_of(work, struct g2d_data, runqueue_work); + struct g2d_runqueue_node *runqueue_node; + + /* + * The engine is busy and the completion of the current node is going + * to poke the runqueue worker, so nothing to do here. + */ + if (test_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags)) + return; mutex_lock(&g2d->runqueue_mutex); - pm_runtime_put_sync(g2d->dev); - complete(&g2d->runqueue_node->complete); - if (g2d->runqueue_node->async) - g2d_free_runqueue_node(g2d, g2d->runqueue_node); + runqueue_node = g2d->runqueue_node; + g2d->runqueue_node = NULL; + + if (runqueue_node) { + pm_runtime_mark_last_busy(g2d->dev); + pm_runtime_put_autosuspend(g2d->dev); + + complete(&runqueue_node->complete); + if (runqueue_node->async) + g2d_free_runqueue_node(g2d, runqueue_node); + } + + if (!test_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags)) { + g2d->runqueue_node = g2d_get_runqueue_node(g2d); + + if (g2d->runqueue_node) { + pm_runtime_get_sync(g2d->dev); + g2d_dma_start(g2d, g2d->runqueue_node); + } + } - if (g2d->suspended) - g2d->runqueue_node = NULL; - else - g2d_exec_runqueue(g2d); mutex_unlock(&g2d->runqueue_mutex); } @@ -918,12 +965,72 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id) } } - if (pending & G2D_INTP_ACMD_FIN) + if (pending & G2D_INTP_ACMD_FIN) { + clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); queue_work(g2d->g2d_workq, &g2d->runqueue_work); + } return IRQ_HANDLED; } +/** + * g2d_wait_finish - wait for the G2D engine to finish the current runqueue node + * @g2d: G2D state object + * @file: if not zero, only wait if the current runqueue node belongs + * to the DRM file + * + * Should the engine not become idle after a 100ms timeout, a hardware + * reset is issued. + */ +static void g2d_wait_finish(struct g2d_data *g2d, struct drm_file *file) +{ + struct device *dev = g2d->dev; + + struct g2d_runqueue_node *runqueue_node = NULL; + unsigned int tries = 10; + + mutex_lock(&g2d->runqueue_mutex); + + /* If no node is currently processed, we have nothing to do. */ + if (!g2d->runqueue_node) + goto out; + + runqueue_node = g2d->runqueue_node; + + /* Check if the currently processed item belongs to us. */ + if (file && runqueue_node->filp != file) + goto out; + + mutex_unlock(&g2d->runqueue_mutex); + + /* Wait for the G2D engine to finish. */ + while (tries-- && (g2d->runqueue_node == runqueue_node)) + mdelay(10); + + mutex_lock(&g2d->runqueue_mutex); + + if (g2d->runqueue_node != runqueue_node) + goto out; + + dev_err(dev, "wait timed out, resetting engine...\n"); + g2d_hw_reset(g2d); + + /* + * After the hardware reset of the engine we are going to loose + * the IRQ which triggers the PM runtime put(). + * So do this manually here. + */ + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + complete(&runqueue_node->complete); + if (runqueue_node->async) + g2d_free_runqueue_node(g2d, runqueue_node); + +out: + mutex_unlock(&g2d->runqueue_mutex); +} + static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist_node *node, int nr, bool for_addr) @@ -1259,10 +1366,11 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, runqueue_node->pid = current->pid; runqueue_node->filp = file; list_add_tail(&runqueue_node->list, &g2d->runqueue); - if (!g2d->runqueue_node) - g2d_exec_runqueue(g2d); mutex_unlock(&g2d->runqueue_mutex); + /* Let the runqueue know that there is work to do. */ + queue_work(g2d->g2d_workq, &g2d->runqueue_work); + if (runqueue_node->async) goto out; @@ -1339,15 +1447,26 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev, if (!g2d) return; + /* Remove the runqueue nodes that belong to us. */ + mutex_lock(&g2d->runqueue_mutex); + g2d_remove_runqueue_nodes(g2d, file); + mutex_unlock(&g2d->runqueue_mutex); + + /* + * Wait for the runqueue worker to finish its current node. + * After this the engine should no longer be accessing any + * memory belonging to us. + */ + g2d_wait_finish(g2d, file); + + /* + * Even after the engine is idle, there might still be stale cmdlists + * (i.e. cmdlisst which we submitted but never executed) around, with + * their corresponding GEM/userptr buffers. + * Properly unmap these buffers here. + */ mutex_lock(&g2d->cmdlist_mutex); list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) { - /* - * unmap all gem objects not completed. - * - * P.S. if current process was terminated forcely then - * there may be some commands in inuse_cmdlist so unmap - * them. - */ g2d_unmap_cmdlist_gem(g2d, node, file); list_move_tail(&node->list, &g2d->free_cmdlist); } @@ -1399,7 +1518,11 @@ static int g2d_probe(struct platform_device *pdev) goto err_destroy_workqueue; } + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, 2000); pm_runtime_enable(dev); + clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); + clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1440,7 +1563,7 @@ static int g2d_probe(struct platform_device *pdev) goto err_put_clk; } - dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n", + dev_info(dev, "The Exynos G2D (ver %d.%d) successfully probed.\n", G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER); return 0; @@ -1458,14 +1581,17 @@ static int g2d_remove(struct platform_device *pdev) { struct g2d_data *g2d = platform_get_drvdata(pdev); + /* Suspend operation and wait for engine idle. */ + set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); + g2d_wait_finish(g2d, NULL); + cancel_work_sync(&g2d->runqueue_work); exynos_drm_subdrv_unregister(&g2d->subdrv); - while (g2d->runqueue_node) { - g2d_free_runqueue_node(g2d, g2d->runqueue_node); - g2d->runqueue_node = g2d_get_runqueue_node(g2d); - } + /* There should be no locking needed here. */ + g2d_remove_runqueue_nodes(g2d, NULL); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_disable(&pdev->dev); g2d_fini_cmdlist(g2d); @@ -1475,20 +1601,37 @@ static int g2d_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int g2d_runtime_suspend(struct device *dev) +#ifdef CONFIG_PM_SLEEP +static int g2d_suspend(struct device *dev) { struct g2d_data *g2d = dev_get_drvdata(dev); - mutex_lock(&g2d->runqueue_mutex); - g2d->suspended = true; - mutex_unlock(&g2d->runqueue_mutex); + /* + * Suspend the runqueue worker operation and wait until the G2D + * engine is idle. + */ + set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); + g2d_wait_finish(g2d, NULL); + flush_work(&g2d->runqueue_work); - while (g2d->runqueue_node) - /* FIXME: good range? */ - usleep_range(500, 1000); + return 0; +} - flush_work(&g2d->runqueue_work); +static int g2d_resume(struct device *dev) +{ + struct g2d_data *g2d = dev_get_drvdata(dev); + + clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags); + queue_work(g2d->g2d_workq, &g2d->runqueue_work); + + return 0; +} +#endif + +#ifdef CONFIG_PM +static int g2d_runtime_suspend(struct device *dev) +{ + struct g2d_data *g2d = dev_get_drvdata(dev); clk_disable_unprepare(g2d->gate_clk); @@ -1504,16 +1647,12 @@ static int g2d_runtime_resume(struct device *dev) if (ret < 0) dev_warn(dev, "failed to enable clock.\n"); - g2d->suspended = false; - g2d_exec_runqueue(g2d); - return ret; } #endif static const struct dev_pm_ops g2d_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, - pm_runtime_force_resume) + SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume) SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL) }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 7f32419b25ea..c2f17f30afab 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -238,7 +238,6 @@ static void exynos_plane_atomic_update(struct drm_plane *plane, return; plane->crtc = state->crtc; - exynos_plane->pending_fb = state->fb; if (exynos_crtc->ops->update_plane) exynos_crtc->ops->update_plane(exynos_crtc, exynos_plane); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index e8f6c92b2a36..57fe514d5c5b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/component.h> +#include <linux/timer.h> #include <drm/exynos_drm.h> @@ -28,6 +29,9 @@ #include "exynos_drm_plane.h" #include "exynos_drm_vidi.h" +/* VIDI uses fixed refresh rate of 50Hz */ +#define VIDI_REFRESH_TIME (1000 / 50) + /* vidi has totally three virtual windows. */ #define WINDOWS_NR 3 @@ -43,12 +47,9 @@ struct vidi_context { struct exynos_drm_plane planes[WINDOWS_NR]; struct edid *raw_edid; unsigned int clkdiv; - unsigned long irq_flags; unsigned int connected; - bool vblank_on; bool suspended; - bool direct_vblank; - struct work_struct work; + struct timer_list timer; struct mutex lock; int pipe; }; @@ -102,30 +103,14 @@ static int vidi_enable_vblank(struct exynos_drm_crtc *crtc) if (ctx->suspended) return -EPERM; - if (!test_and_set_bit(0, &ctx->irq_flags)) - ctx->vblank_on = true; - - ctx->direct_vblank = true; - - /* - * in case of page flip request, vidi_finish_pageflip function - * will not be called because direct_vblank is true and then - * that function will be called by crtc_ops->update_plane callback - */ - schedule_work(&ctx->work); + mod_timer(&ctx->timer, + jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1); return 0; } static void vidi_disable_vblank(struct exynos_drm_crtc *crtc) { - struct vidi_context *ctx = crtc->ctx; - - if (ctx->suspended) - return; - - if (test_and_clear_bit(0, &ctx->irq_flags)) - ctx->vblank_on = false; } static void vidi_update_plane(struct exynos_drm_crtc *crtc, @@ -140,9 +125,6 @@ static void vidi_update_plane(struct exynos_drm_crtc *crtc, addr = exynos_drm_fb_dma_addr(state->fb, 0); DRM_DEBUG_KMS("dma_addr = %pad\n", &addr); - - if (ctx->vblank_on) - schedule_work(&ctx->work); } static void vidi_enable(struct exynos_drm_crtc *crtc) @@ -153,17 +135,17 @@ static void vidi_enable(struct exynos_drm_crtc *crtc) ctx->suspended = false; - /* if vblank was enabled status, enable it again. */ - if (test_and_clear_bit(0, &ctx->irq_flags)) - vidi_enable_vblank(ctx->crtc); - mutex_unlock(&ctx->lock); + + drm_crtc_vblank_on(&crtc->base); } static void vidi_disable(struct exynos_drm_crtc *crtc) { struct vidi_context *ctx = crtc->ctx; + drm_crtc_vblank_off(&crtc->base); + mutex_lock(&ctx->lock); ctx->suspended = true; @@ -190,37 +172,16 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = { .update_plane = vidi_update_plane, }; -static void vidi_fake_vblank_handler(struct work_struct *work) +static void vidi_fake_vblank_timer(unsigned long arg) { - struct vidi_context *ctx = container_of(work, struct vidi_context, - work); - int win; + struct vidi_context *ctx = (void *)arg; if (ctx->pipe < 0) return; - /* refresh rate is about 50Hz. */ - usleep_range(16000, 20000); - - mutex_lock(&ctx->lock); - - if (ctx->direct_vblank) { - drm_crtc_handle_vblank(&ctx->crtc->base); - ctx->direct_vblank = false; - mutex_unlock(&ctx->lock); - return; - } - - mutex_unlock(&ctx->lock); - - for (win = 0 ; win < WINDOWS_NR ; win++) { - struct exynos_drm_plane *plane = &ctx->planes[win]; - - if (!plane->pending_fb) - continue; - - exynos_drm_crtc_finish_update(ctx->crtc, plane); - } + if (drm_crtc_handle_vblank(&ctx->crtc->base)) + mod_timer(&ctx->timer, + jiffies + msecs_to_jiffies(VIDI_REFRESH_TIME) - 1); } static ssize_t vidi_show_connection(struct device *dev, @@ -489,6 +450,9 @@ static int vidi_bind(struct device *dev, struct device *master, void *data) static void vidi_unbind(struct device *dev, struct device *master, void *data) { + struct vidi_context *ctx = dev_get_drvdata(dev); + + del_timer_sync(&ctx->timer); } static const struct component_ops vidi_component_ops = { @@ -507,7 +471,7 @@ static int vidi_probe(struct platform_device *pdev) ctx->pdev = pdev; - INIT_WORK(&ctx->work, vidi_fake_vblank_handler); + setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx); mutex_init(&ctx->lock); diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 2275efe41acd..e8fb6ef947ee 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1669,10 +1669,9 @@ static int hdmi_resources_init(struct hdmi_context *hdata) if (ret) return ret; - for (i = 0; i < ARRAY_SIZE(supply); ++i) { + for (i = 0; i < ARRAY_SIZE(supply); ++i) hdata->regul_bulk[i].supply = supply[i]; - hdata->regul_bulk[i].consumer = NULL; - } + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk); if (ret) { if (ret != -EPROBE_DEFER) @@ -1760,28 +1759,74 @@ static const struct component_ops hdmi_component_ops = { .unbind = hdmi_unbind, }; -static struct device_node *hdmi_legacy_ddc_dt_binding(struct device *dev) +static int hdmi_get_ddc_adapter(struct hdmi_context *hdata) { const char *compatible_str = "samsung,exynos4210-hdmiddc"; struct device_node *np; + struct i2c_adapter *adpt; np = of_find_compatible_node(NULL, NULL, compatible_str); if (np) - return of_get_next_parent(np); + np = of_get_next_parent(np); + else + np = of_parse_phandle(hdata->dev->of_node, "ddc", 0); + + if (!np) { + DRM_ERROR("Failed to find ddc node in device tree\n"); + return -ENODEV; + } - return NULL; + adpt = of_find_i2c_adapter_by_node(np); + of_node_put(np); + + if (!adpt) { + DRM_INFO("Failed to get ddc i2c adapter by node\n"); + return -EPROBE_DEFER; + } + + hdata->ddc_adpt = adpt; + + return 0; } -static struct device_node *hdmi_legacy_phy_dt_binding(struct device *dev) +static int hdmi_get_phy_io(struct hdmi_context *hdata) { const char *compatible_str = "samsung,exynos4212-hdmiphy"; + struct device_node *np; + int ret = 0; + + np = of_find_compatible_node(NULL, NULL, compatible_str); + if (!np) { + np = of_parse_phandle(hdata->dev->of_node, "phy", 0); + if (!np) { + DRM_ERROR("Failed to find hdmiphy node in device tree\n"); + return -ENODEV; + } + } + + if (hdata->drv_data->is_apb_phy) { + hdata->regs_hdmiphy = of_iomap(np, 0); + if (!hdata->regs_hdmiphy) { + DRM_ERROR("failed to ioremap hdmi phy\n"); + ret = -ENOMEM; + goto out; + } + } else { + hdata->hdmiphy_port = of_find_i2c_device_by_node(np); + if (!hdata->hdmiphy_port) { + DRM_INFO("Failed to get hdmi phy i2c client\n"); + ret = -EPROBE_DEFER; + goto out; + } + } - return of_find_compatible_node(NULL, NULL, compatible_str); +out: + of_node_put(np); + return ret; } static int hdmi_probe(struct platform_device *pdev) { - struct device_node *ddc_node, *phy_node; struct device *dev = &pdev->dev; struct hdmi_context *hdata; struct resource *res; @@ -1811,52 +1856,13 @@ static int hdmi_probe(struct platform_device *pdev) return ret; } - ddc_node = hdmi_legacy_ddc_dt_binding(dev); - if (ddc_node) - goto out_get_ddc_adpt; - - ddc_node = of_parse_phandle(dev->of_node, "ddc", 0); - if (!ddc_node) { - DRM_ERROR("Failed to find ddc node in device tree\n"); - return -ENODEV; - } - of_node_put(dev->of_node); - -out_get_ddc_adpt: - hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node); - if (!hdata->ddc_adpt) { - DRM_ERROR("Failed to get ddc i2c adapter by node\n"); - return -EPROBE_DEFER; - } - - phy_node = hdmi_legacy_phy_dt_binding(dev); - if (phy_node) - goto out_get_phy_port; + ret = hdmi_get_ddc_adapter(hdata); + if (ret) + return ret; - phy_node = of_parse_phandle(dev->of_node, "phy", 0); - if (!phy_node) { - DRM_ERROR("Failed to find hdmiphy node in device tree\n"); - ret = -ENODEV; + ret = hdmi_get_phy_io(hdata); + if (ret) goto err_ddc; - } - of_node_put(dev->of_node); - -out_get_phy_port: - if (hdata->drv_data->is_apb_phy) { - hdata->regs_hdmiphy = of_iomap(phy_node, 0); - if (!hdata->regs_hdmiphy) { - DRM_ERROR("failed to ioremap hdmi phy\n"); - ret = -ENOMEM; - goto err_ddc; - } - } else { - hdata->hdmiphy_port = of_find_i2c_device_by_node(phy_node); - if (!hdata->hdmiphy_port) { - DRM_ERROR("Failed to get hdmi phy i2c client\n"); - ret = -EPROBE_DEFER; - goto err_ddc; - } - } INIT_DELAYED_WORK(&hdata->hotplug_work, hdmi_hotplug_work_func); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index e1d47f9435fc..edb20a34c66c 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -73,6 +73,9 @@ enum mixer_version_id { enum mixer_flag_bits { MXR_BIT_POWERED, MXR_BIT_VSYNC, + MXR_BIT_INTERLACE, + MXR_BIT_VP_ENABLED, + MXR_BIT_HAS_SCLK, }; static const uint32_t mixer_formats[] = { @@ -98,9 +101,6 @@ struct mixer_context { struct exynos_drm_plane planes[MIXER_WIN_NR]; int pipe; unsigned long flags; - bool interlace; - bool vp_enabled; - bool has_sclk; struct mixer_resources mixer_res; enum mixer_version_id mxr_ver; @@ -346,7 +346,7 @@ static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) mixer_reg_writemask(res, MXR_STATUS, enable ? MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE); - if (ctx->vp_enabled) + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) vp_reg_write(res, VP_SHADOW_UPDATE, enable ? VP_SHADOW_UPDATE_ENABLE : 0); } @@ -357,8 +357,8 @@ static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height) u32 val; /* choosing between interlace and progressive mode */ - val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE : - MXR_CFG_SCAN_PROGRESSIVE); + val = test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? + MXR_CFG_SCAN_INTERLACE : MXR_CFG_SCAN_PROGRESSIVE; if (ctx->mxr_ver != MXR_VER_128_0_0_184) { /* choosing between proper HD and SD mode */ @@ -436,9 +436,10 @@ static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win, mixer_reg_writemask(res, MXR_LAYER_CFG, MXR_LAYER_CFG_GRP1_VAL(priority), MXR_LAYER_CFG_GRP1_MASK); + break; case VP_DEFAULT_WIN: - if (ctx->vp_enabled) { + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON); mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE); @@ -501,7 +502,7 @@ static void vp_video_buffer(struct mixer_context *ctx, chroma_addr[0] = exynos_drm_fb_dma_addr(fb, 1); if (mode->flags & DRM_MODE_FLAG_INTERLACE) { - ctx->interlace = true; + __set_bit(MXR_BIT_INTERLACE, &ctx->flags); if (tiled_mode) { luma_addr[1] = luma_addr[0] + 0x40; chroma_addr[1] = chroma_addr[0] + 0x40; @@ -510,7 +511,7 @@ static void vp_video_buffer(struct mixer_context *ctx, chroma_addr[1] = chroma_addr[0] + fb->pitches[0]; } } else { - ctx->interlace = false; + __clear_bit(MXR_BIT_INTERLACE, &ctx->flags); luma_addr[1] = 0; chroma_addr[1] = 0; } @@ -518,7 +519,7 @@ static void vp_video_buffer(struct mixer_context *ctx, spin_lock_irqsave(&res->reg_slock, flags); /* interlace or progressive scan mode */ - val = (ctx->interlace ? ~0 : 0); + val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP); /* setup format */ @@ -541,7 +542,7 @@ static void vp_video_buffer(struct mixer_context *ctx, vp_reg_write(res, VP_DST_WIDTH, state->crtc.w); vp_reg_write(res, VP_DST_H_POSITION, state->crtc.x); - if (ctx->interlace) { + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { vp_reg_write(res, VP_DST_HEIGHT, state->crtc.h / 2); vp_reg_write(res, VP_DST_V_POSITION, state->crtc.y / 2); } else { @@ -636,9 +637,9 @@ static void mixer_graph_buffer(struct mixer_context *ctx, src_y_offset = 0; if (mode->flags & DRM_MODE_FLAG_INTERLACE) - ctx->interlace = true; + __set_bit(MXR_BIT_INTERLACE, &ctx->flags); else - ctx->interlace = false; + __clear_bit(MXR_BIT_INTERLACE, &ctx->flags); spin_lock_irqsave(&res->reg_slock, flags); @@ -697,10 +698,10 @@ static void mixer_graph_buffer(struct mixer_context *ctx, static void vp_win_reset(struct mixer_context *ctx) { struct mixer_resources *res = &ctx->mixer_res; - int tries = 100; + unsigned int tries = 100; vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING); - for (tries = 100; tries; --tries) { + while (tries--) { /* waiting until VP_SRESET_PROCESSING is 0 */ if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING) break; @@ -733,7 +734,7 @@ static void mixer_win_reset(struct mixer_context *ctx) mixer_reg_write(res, MXR_BG_COLOR1, 0x008080); mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); - if (ctx->vp_enabled) { + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { /* configuration of Video Processor Registers */ vp_win_reset(ctx); vp_default_filter(res); @@ -742,7 +743,7 @@ static void mixer_win_reset(struct mixer_context *ctx) /* disable all layers */ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE); mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE); - if (ctx->vp_enabled) + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE); spin_unlock_irqrestore(&res->reg_slock, flags); @@ -753,7 +754,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) struct mixer_context *ctx = arg; struct mixer_resources *res = &ctx->mixer_res; u32 val, base, shadow; - int win; spin_lock(&res->reg_slock); @@ -767,7 +767,7 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) val &= ~MXR_INT_STATUS_VSYNC; /* interlace scan need to check shadow register */ - if (ctx->interlace) { + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); shadow = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0)); if (base != shadow) @@ -780,14 +780,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) } drm_crtc_handle_vblank(&ctx->crtc->base); - for (win = 0 ; win < MIXER_WIN_NR ; win++) { - struct exynos_drm_plane *plane = &ctx->planes[win]; - - if (!plane->pending_fb) - continue; - - exynos_drm_crtc_finish_update(ctx->crtc, plane); - } } out: @@ -867,7 +859,7 @@ static int vp_resources_init(struct mixer_context *mixer_ctx) return -ENODEV; } - if (mixer_ctx->has_sclk) { + if (test_bit(MXR_BIT_HAS_SCLK, &mixer_ctx->flags)) { mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer"); if (IS_ERR(mixer_res->sclk_mixer)) { dev_err(dev, "failed to get clock 'sclk_mixer'\n"); @@ -917,7 +909,7 @@ static int mixer_initialize(struct mixer_context *mixer_ctx, return ret; } - if (mixer_ctx->vp_enabled) { + if (test_bit(MXR_BIT_VP_ENABLED, &mixer_ctx->flags)) { /* acquire vp resources: regs, irqs, clocks */ ret = vp_resources_init(mixer_ctx); if (ret) { @@ -1160,7 +1152,8 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data) return ret; for (i = 0; i < MIXER_WIN_NR; i++) { - if (i == VP_DEFAULT_WIN && !ctx->vp_enabled) + if (i == VP_DEFAULT_WIN && !test_bit(MXR_BIT_VP_ENABLED, + &ctx->flags)) continue; ret = exynos_plane_init(drm_dev, &ctx->planes[i], i, @@ -1215,10 +1208,13 @@ static int mixer_probe(struct platform_device *pdev) ctx->pdev = pdev; ctx->dev = dev; - ctx->vp_enabled = drv->is_vp_enabled; - ctx->has_sclk = drv->has_sclk; ctx->mxr_ver = drv->version; + if (drv->is_vp_enabled) + __set_bit(MXR_BIT_VP_ENABLED, &ctx->flags); + if (drv->has_sclk) + __set_bit(MXR_BIT_HAS_SCLK, &ctx->flags); + platform_set_drvdata(pdev, ctx); ret = component_add(&pdev->dev, &mixer_component_ops); @@ -1244,9 +1240,9 @@ static int __maybe_unused exynos_mixer_suspend(struct device *dev) clk_disable_unprepare(res->hdmi); clk_disable_unprepare(res->mixer); - if (ctx->vp_enabled) { + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { clk_disable_unprepare(res->vp); - if (ctx->has_sclk) + if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) clk_disable_unprepare(res->sclk_mixer); } @@ -1269,14 +1265,14 @@ static int __maybe_unused exynos_mixer_resume(struct device *dev) DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret); return ret; } - if (ctx->vp_enabled) { + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) { ret = clk_prepare_enable(res->vp); if (ret < 0) { DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n", ret); return ret; } - if (ctx->has_sclk) { + if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) { ret = clk_prepare_enable(res->sclk_mixer); if (ret < 0) { DRM_ERROR("Failed to prepare_enable the " \ |