summaryrefslogtreecommitdiffstats
path: root/drivers/media/platform
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/media/platform')
-rw-r--r--drivers/media/platform/Kconfig11
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/am437x/Kconfig11
-rw-r--r--drivers/media/platform/am437x/Makefile3
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c2776
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.h283
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe_regs.h140
-rw-r--r--drivers/media/platform/coda/coda-bit.c25
-rw-r--r--drivers/media/platform/coda/coda-common.c165
-rw-r--r--drivers/media/platform/coda/coda.h2
-rw-r--r--drivers/media/platform/coda/coda_regs.h4
-rw-r--r--drivers/media/platform/davinci/Kconfig6
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h12
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c6
-rw-r--r--drivers/media/platform/marvell-ccic/Kconfig3
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c1
-rw-r--r--drivers/media/platform/omap3isp/isp.c3
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c17
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c23
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c21
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c6
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c21
-rw-r--r--drivers/media/platform/sh_veu.c35
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c7
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c7
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c94
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c7
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c18
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c162
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c4
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.c10
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.h1
-rw-r--r--drivers/media/platform/vsp1/vsp1.h14
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c81
-rw-r--r--drivers/media/platform/vsp1/vsp1_hsit.c5
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h4
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c18
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h1
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c13
42 files changed, 3561 insertions, 465 deletions
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 765bffb49a72..d9b872b9285a 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -56,10 +56,8 @@ config VIDEO_VIU
config VIDEO_TIMBERDALE
tristate "Support for timberdale Video In/LogiWIN"
- depends on VIDEO_V4L2 && I2C && DMADEVICES
- depends on MFD_TIMBERDALE || COMPILE_TEST
- select DMA_ENGINE
- select TIMB_DMA
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ depends on (MFD_TIMBERDALE && TIMB_DMA) || COMPILE_TEST
select VIDEO_ADV7180
select VIDEOBUF_DMA_CONTIG
---help---
@@ -118,6 +116,7 @@ config VIDEO_S3C_CAMIF
source "drivers/media/platform/soc_camera/Kconfig"
source "drivers/media/platform/exynos4-is/Kconfig"
source "drivers/media/platform/s5p-tv/Kconfig"
+source "drivers/media/platform/am437x/Kconfig"
endif # V4L_PLATFORM_DRIVERS
@@ -140,6 +139,7 @@ config VIDEO_CODA
depends on HAS_DMA
select SRAM
select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
select V4L2_MEM2MEM_DEV
select GENERIC_ALLOCATOR
---help---
@@ -213,7 +213,6 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
config VIDEO_SH_VEU
tristate "SuperH VEU mem2mem video processing driver"
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
- depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
@@ -223,7 +222,7 @@ config VIDEO_SH_VEU
config VIDEO_RENESAS_VSP1
tristate "Renesas VSP1 Video Processing Engine"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
- depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on (ARCH_SHMOBILE && OF) || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
---help---
This is a V4L2 driver for the Renesas VSP1 video processing engine.
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index a49936b8ce8a..3ec154742083 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -46,4 +46,6 @@ obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
obj-y += omap/
+obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
+
ccflags-y += -I$(srctree)/drivers/media/i2c
diff --git a/drivers/media/platform/am437x/Kconfig b/drivers/media/platform/am437x/Kconfig
new file mode 100644
index 000000000000..7b023a76e32e
--- /dev/null
+++ b/drivers/media/platform/am437x/Kconfig
@@ -0,0 +1,11 @@
+config VIDEO_AM437X_VPFE
+ tristate "TI AM437x VPFE video capture driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on SOC_AM43XX || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Support for AM437x Video Processing Front End based Video
+ Capture Driver.
+
+ To compile this driver as a module, choose M here. The module
+ will be called am437x-vpfe.
diff --git a/drivers/media/platform/am437x/Makefile b/drivers/media/platform/am437x/Makefile
new file mode 100644
index 000000000000..d11fff16f260
--- /dev/null
+++ b/drivers/media/platform/am437x/Makefile
@@ -0,0 +1,3 @@
+# Makefile for AM437x VPFE driver
+
+obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x-vpfe.o
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
new file mode 100644
index 000000000000..56a5cb0d2152
--- /dev/null
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -0,0 +1,2776 @@
+/*
+ * TI VPFE capture Driver
+ *
+ * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-of.h>
+
+#include "am437x-vpfe.h"
+
+#define VPFE_MODULE_NAME "vpfe"
+#define VPFE_VERSION "0.1.0"
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level 0-8");
+
+#define vpfe_dbg(level, dev, fmt, arg...) \
+ v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ##arg)
+#define vpfe_info(dev, fmt, arg...) \
+ v4l2_info(&dev->v4l2_dev, fmt, ##arg)
+#define vpfe_err(dev, fmt, arg...) \
+ v4l2_err(&dev->v4l2_dev, fmt, ##arg)
+
+/* standard information */
+struct vpfe_standard {
+ v4l2_std_id std_id;
+ unsigned int width;
+ unsigned int height;
+ struct v4l2_fract pixelaspect;
+ int frame_format;
+};
+
+static const struct vpfe_standard vpfe_standards[] = {
+ {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
+ {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
+};
+
+struct bus_format {
+ unsigned int width;
+ unsigned int bpp;
+};
+
+/*
+ * struct vpfe_fmt - VPFE media bus format information
+ * @name: V4L2 format description
+ * @code: V4L2 media bus format code
+ * @shifted: V4L2 media bus format code for the same pixel layout but
+ * shifted to be 8 bits per pixel. =0 if format is not shiftable.
+ * @pixelformat: V4L2 pixel format FCC identifier
+ * @width: Bits per pixel (when transferred over a bus)
+ * @bpp: Bytes per pixel (when stored in memory)
+ * @supported: Indicates format supported by subdev
+ */
+struct vpfe_fmt {
+ const char *name;
+ u32 fourcc;
+ u32 code;
+ struct bus_format l;
+ struct bus_format s;
+ bool supported;
+ u32 index;
+};
+
+static struct vpfe_fmt formats[] = {
+ {
+ .name = "YUV 4:2:2 packed, YCbYCr",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "YUV 4:2:2 packed, CbYCrY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "YUV 4:2:2 packed, YCrYCb",
+ .fourcc = V4L2_PIX_FMT_YVYU,
+ .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "YUV 4:2:2 packed, CrYCbY",
+ .fourcc = V4L2_PIX_FMT_VYUY,
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "RAW8 BGGR",
+ .fourcc = V4L2_PIX_FMT_SBGGR8,
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RAW8 GBRG",
+ .fourcc = V4L2_PIX_FMT_SGBRG8,
+ .code = MEDIA_BUS_FMT_SGBRG8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RAW8 GRBG",
+ .fourcc = V4L2_PIX_FMT_SGRBG8,
+ .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RAW8 RGGB",
+ .fourcc = V4L2_PIX_FMT_SRGGB8,
+ .code = MEDIA_BUS_FMT_SRGGB8_1X8,
+ .l.width = 10,
+ .l.bpp = 2,
+ .s.width = 8,
+ .s.bpp = 1,
+ .supported = false,
+ }, {
+ .name = "RGB565 (LE)",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ }, {
+ .name = "RGB565 (BE)",
+ .fourcc = V4L2_PIX_FMT_RGB565X,
+ .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+ .l.width = 10,
+ .l.bpp = 4,
+ .s.width = 8,
+ .s.bpp = 2,
+ .supported = false,
+ },
+};
+
+static int
+__vpfe_get_format(struct vpfe_device *vpfe,
+ struct v4l2_format *format, unsigned int *bpp);
+
+static struct vpfe_fmt *find_format_by_code(unsigned int code)
+{
+ struct vpfe_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (fmt->code == code)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static struct vpfe_fmt *find_format_by_pix(unsigned int pixelformat)
+{
+ struct vpfe_fmt *fmt;
+ unsigned int k;
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ fmt = &formats[k];
+ if (fmt->fourcc == pixelformat)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static void
+mbus_to_pix(struct vpfe_device *vpfe,
+ const struct v4l2_mbus_framefmt *mbus,
+ struct v4l2_pix_format *pix, unsigned int *bpp)
+{
+ struct vpfe_subdev_info *sdinfo = vpfe->current_subdev;
+ unsigned int bus_width = sdinfo->vpfe_param.bus_width;
+ struct vpfe_fmt *fmt;
+
+ fmt = find_format_by_code(mbus->code);
+ if (WARN_ON(fmt == NULL)) {
+ pr_err("Invalid mbus code set\n");
+ *bpp = 1;
+ return;
+ }
+
+ memset(pix, 0, sizeof(*pix));
+ v4l2_fill_pix_format(pix, mbus);
+ pix->pixelformat = fmt->fourcc;
+ *bpp = (bus_width == 10) ? fmt->l.bpp : fmt->s.bpp;
+
+ /* pitch should be 32 bytes aligned */
+ pix->bytesperline = ALIGN(pix->width * *bpp, 32);
+ pix->sizeimage = pix->bytesperline * pix->height;
+}
+
+static void pix_to_mbus(struct vpfe_device *vpfe,
+ struct v4l2_pix_format *pix_fmt,
+ struct v4l2_mbus_framefmt *mbus_fmt)
+{
+ struct vpfe_fmt *fmt;
+
+ fmt = find_format_by_pix(pix_fmt->pixelformat);
+ if (!fmt) {
+ /* default to first entry */
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
+ pix_fmt->pixelformat);
+ fmt = &formats[0];
+ }
+
+ memset(mbus_fmt, 0, sizeof(*mbus_fmt));
+ v4l2_fill_mbus_format(mbus_fmt, pix_fmt, fmt->code);
+}
+
+/* Print Four-character-code (FOURCC) */
+static char *print_fourcc(u32 fmt)
+{
+ static char code[5];
+
+ code[0] = (unsigned char)(fmt & 0xff);
+ code[1] = (unsigned char)((fmt >> 8) & 0xff);
+ code[2] = (unsigned char)((fmt >> 16) & 0xff);
+ code[3] = (unsigned char)((fmt >> 24) & 0xff);
+ code[4] = '\0';
+
+ return code;
+}
+
+static int
+cmp_v4l2_format(const struct v4l2_format *lhs, const struct v4l2_format *rhs)
+{
+ return lhs->type == rhs->type &&
+ lhs->fmt.pix.width == rhs->fmt.pix.width &&
+ lhs->fmt.pix.height == rhs->fmt.pix.height &&
+ lhs->fmt.pix.pixelformat == rhs->fmt.pix.pixelformat &&
+ lhs->fmt.pix.field == rhs->fmt.pix.field &&
+ lhs->fmt.pix.colorspace == rhs->fmt.pix.colorspace &&
+ lhs->fmt.pix.ycbcr_enc == rhs->fmt.pix.ycbcr_enc &&
+ lhs->fmt.pix.quantization == rhs->fmt.pix.quantization;
+}
+
+static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset)
+{
+ return ioread32(ccdc->ccdc_cfg.base_addr + offset);
+}
+
+static inline void vpfe_reg_write(struct vpfe_ccdc *ccdc, u32 val, u32 offset)
+{
+ iowrite32(val, ccdc->ccdc_cfg.base_addr + offset);
+}
+
+static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc)
+{
+ return container_of(ccdc, struct vpfe_device, ccdc);
+}
+
+static inline struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct vpfe_cap_buffer, vb);
+}
+
+static inline void vpfe_pcr_enable(struct vpfe_ccdc *ccdc, int flag)
+{
+ vpfe_reg_write(ccdc, !!flag, VPFE_PCR);
+}
+
+static void vpfe_config_enable(struct vpfe_ccdc *ccdc, int flag)
+{
+ unsigned int cfg;
+
+ if (!flag) {
+ cfg = vpfe_reg_read(ccdc, VPFE_CONFIG);
+ cfg &= ~(VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT);
+ } else {
+ cfg = VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT;
+ }
+
+ vpfe_reg_write(ccdc, cfg, VPFE_CONFIG);
+}
+
+static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc,
+ struct v4l2_rect *image_win,
+ enum ccdc_frmfmt frm_fmt,
+ int bpp)
+{
+ int horz_start, horz_nr_pixels;
+ int vert_start, vert_nr_lines;
+ int val, mid_img;
+
+ /*
+ * ppc - per pixel count. indicates how many pixels per cell
+ * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
+ * raw capture this is 1
+ */
+ horz_start = image_win->left * bpp;
+ horz_nr_pixels = (image_win->width * bpp) - 1;
+ vpfe_reg_write(ccdc, (horz_start << VPFE_HORZ_INFO_SPH_SHIFT) |
+ horz_nr_pixels, VPFE_HORZ_INFO);
+
+ vert_start = image_win->top;
+
+ if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ vert_nr_lines = (image_win->height >> 1) - 1;
+ vert_start >>= 1;
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ /* configure VDINT0 */
+ val = (vert_start << VPFE_VDINT_VDINT0_SHIFT);
+ } else {
+ /* Since first line doesn't have any data */
+ vert_start += 1;
+ vert_nr_lines = image_win->height - 1;
+ /*
+ * configure VDINT0 and VDINT1. VDINT1 will be at half
+ * of image height
+ */
+ mid_img = vert_start + (image_win->height / 2);
+ val = (vert_start << VPFE_VDINT_VDINT0_SHIFT) |
+ (mid_img & VPFE_VDINT_VDINT1_MASK);
+ }
+
+ vpfe_reg_write(ccdc, val, VPFE_VDINT);
+
+ vpfe_reg_write(ccdc, (vert_start << VPFE_VERT_START_SLV0_SHIFT) |
+ vert_start, VPFE_VERT_START);
+ vpfe_reg_write(ccdc, vert_nr_lines, VPFE_VERT_LINES);
+}
+
+static void vpfe_reg_dump(struct vpfe_ccdc *ccdc)
+{
+ struct vpfe_device *vpfe = to_vpfe(ccdc);
+
+ vpfe_dbg(3, vpfe, "ALAW: 0x%x\n", vpfe_reg_read(ccdc, VPFE_ALAW));
+ vpfe_dbg(3, vpfe, "CLAMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_CLAMP));
+ vpfe_dbg(3, vpfe, "DCSUB: 0x%x\n", vpfe_reg_read(ccdc, VPFE_DCSUB));
+ vpfe_dbg(3, vpfe, "BLKCMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_BLKCMP));
+ vpfe_dbg(3, vpfe, "COLPTN: 0x%x\n", vpfe_reg_read(ccdc, VPFE_COLPTN));
+ vpfe_dbg(3, vpfe, "SDOFST: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SDOFST));
+ vpfe_dbg(3, vpfe, "SYN_MODE: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_SYNMODE));
+ vpfe_dbg(3, vpfe, "HSIZE_OFF: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_HSIZE_OFF));
+ vpfe_dbg(3, vpfe, "HORZ_INFO: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_HORZ_INFO));
+ vpfe_dbg(3, vpfe, "VERT_START: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_VERT_START));
+ vpfe_dbg(3, vpfe, "VERT_LINES: 0x%x\n",
+ vpfe_reg_read(ccdc, VPFE_VERT_LINES));
+}
+
+static int
+vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_config_params_raw *ccdcparam)
+{
+ struct vpfe_device *vpfe = to_vpfe(ccdc);
+ u8 max_gamma, max_data;
+
+ if (!ccdcparam->alaw.enable)
+ return 0;
+
+ max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd);
+ max_data = ccdc_data_size_max_bit(ccdcparam->data_sz);
+
+ if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 ||
+ ccdcparam->alaw.gamma_wd < VPFE_CCDC_GAMMA_BITS_15_6 ||
+ max_gamma > max_data) {
+ vpfe_dbg(1, vpfe, "Invalid data line select\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+vpfe_ccdc_update_raw_params(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_config_params_raw *raw_params)
+{
+ struct vpfe_ccdc_config_params_raw *config_params =
+ &ccdc->ccdc_cfg.bayer.config_params;
+
+ config_params = raw_params;
+}
+
+/*
+ * vpfe_ccdc_restore_defaults()
+ * This function will write defaults to all CCDC registers
+ */
+static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc)
+{
+ int i;
+
+ /* Disable CCDC */
+ vpfe_pcr_enable(ccdc, 0);
+
+ /* set all registers to default value */
+ for (i = 4; i <= 0x94; i += 4)
+ vpfe_reg_write(ccdc, 0, i);
+
+ vpfe_reg_write(ccdc, VPFE_NO_CULLING, VPFE_CULLING);
+ vpfe_reg_write(ccdc, VPFE_CCDC_GAMMA_BITS_11_2, VPFE_ALAW);
+}
+
+static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev)
+{
+ int dma_cntl, i, pcr;
+
+ /* If the CCDC module is still busy wait for it to be done */
+ for (i = 0; i < 10; i++) {
+ usleep_range(5000, 6000);
+ pcr = vpfe_reg_read(ccdc, VPFE_PCR);
+ if (!pcr)
+ break;
+
+ /* make sure it it is disabled */
+ vpfe_pcr_enable(ccdc, 0);
+ }
+
+ /* Disable CCDC by resetting all register to default POR values */
+ vpfe_ccdc_restore_defaults(ccdc);
+
+ /* if DMA_CNTL overflow bit is set. Clear it
+ * It appears to take a while for this to become quiescent ~20ms
+ */
+ for (i = 0; i < 10; i++) {
+ dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL);
+ if (!(dma_cntl & VPFE_DMA_CNTL_OVERFLOW))
+ break;
+
+ /* Clear the overflow bit */
+ vpfe_reg_write(ccdc, dma_cntl, VPFE_DMA_CNTL);
+ usleep_range(5000, 6000);
+ }
+
+ /* Disabled the module at the CONFIG level */
+ vpfe_config_enable(ccdc, 0);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
+static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct vpfe_ccdc_config_params_raw raw_params;
+ int x;
+
+ if (ccdc->ccdc_cfg.if_type != VPFE_RAW_BAYER)
+ return -EINVAL;
+
+ x = copy_from_user(&raw_params, params, sizeof(raw_params));
+ if (x) {
+ vpfe_dbg(1, vpfe,
+ "vpfe_ccdc_set_params: error in copying ccdc params, %d\n",
+ x);
+ return -EFAULT;
+ }
+
+ if (!vpfe_ccdc_validate_param(ccdc, &raw_params)) {
+ vpfe_ccdc_update_raw_params(ccdc, &raw_params);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * vpfe_ccdc_config_ycbcr()
+ * This function will configure CCDC for YCbCr video capture
+ */
+static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr;
+ u32 syn_mode;
+
+ vpfe_dbg(3, vpfe, "vpfe_ccdc_config_ycbcr:\n");
+ /*
+ * first restore the CCDC registers to default values
+ * This is important since we assume default values to be set in
+ * a lot of registers that we didn't touch
+ */
+ vpfe_ccdc_restore_defaults(ccdc);
+
+ /*
+ * configure pixel format, frame format, configure video frame
+ * format, enable output to SDRAM, enable internal timing generator
+ * and 8bit pack mode
+ */
+ syn_mode = (((params->pix_fmt & VPFE_SYN_MODE_INPMOD_MASK) <<
+ VPFE_SYN_MODE_INPMOD_SHIFT) |
+ ((params->frm_fmt & VPFE_SYN_FLDMODE_MASK) <<
+ VPFE_SYN_FLDMODE_SHIFT) | VPFE_VDHDEN_ENABLE |
+ VPFE_WEN_ENABLE | VPFE_DATA_PACK_ENABLE);
+
+ /* setup BT.656 sync mode */
+ if (params->bt656_enable) {
+ vpfe_reg_write(ccdc, VPFE_REC656IF_BT656_EN, VPFE_REC656IF);
+
+ /*
+ * configure the FID, VD, HD pin polarity,
+ * fld,hd pol positive, vd negative, 8-bit data
+ */
+ syn_mode |= VPFE_SYN_MODE_VD_POL_NEGATIVE;
+ if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ syn_mode |= VPFE_SYN_MODE_10BITS;
+ else
+ syn_mode |= VPFE_SYN_MODE_8BITS;
+ } else {
+ /* y/c external sync mode */
+ syn_mode |= (((params->fid_pol & VPFE_FID_POL_MASK) <<
+ VPFE_FID_POL_SHIFT) |
+ ((params->hd_pol & VPFE_HD_POL_MASK) <<
+ VPFE_HD_POL_SHIFT) |
+ ((params->vd_pol & VPFE_VD_POL_MASK) <<
+ VPFE_VD_POL_SHIFT));
+ }
+ vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
+
+ /* configure video window */
+ vpfe_ccdc_setwin(ccdc, &params->win,
+ params->frm_fmt, params->bytesperpixel);
+
+ /*
+ * configure the order of y cb cr in SDRAM, and disable latch
+ * internal register on vsync
+ */
+ if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT)
+ vpfe_reg_write(ccdc,
+ (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
+ VPFE_LATCH_ON_VSYNC_DISABLE |
+ VPFE_CCDCFG_BW656_10BIT, VPFE_CCDCFG);
+ else
+ vpfe_reg_write(ccdc,
+ (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) |
+ VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
+
+ /*
+ * configure the horizontal line offset. This should be a
+ * on 32 byte boundary. So clear LSB 5 bits
+ */
+ vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
+
+ /* configure the memory line offset */
+ if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
+ /* two fields are interleaved in memory */
+ vpfe_reg_write(ccdc, VPFE_SDOFST_FIELD_INTERLEAVED,
+ VPFE_SDOFST);
+}
+
+static void
+vpfe_ccdc_config_black_clamp(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_black_clamp *bclamp)
+{
+ u32 val;
+
+ if (!bclamp->enable) {
+ /* configure DCSub */
+ val = (bclamp->dc_sub) & VPFE_BLK_DC_SUB_MASK;
+ vpfe_reg_write(ccdc, val, VPFE_DCSUB);
+ vpfe_reg_write(ccdc, VPFE_CLAMP_DEFAULT_VAL, VPFE_CLAMP);
+ return;
+ }
+ /*
+ * Configure gain, Start pixel, No of line to be avg,
+ * No of pixel/line to be avg, & Enable the Black clamping
+ */
+ val = ((bclamp->sgain & VPFE_BLK_SGAIN_MASK) |
+ ((bclamp->start_pixel & VPFE_BLK_ST_PXL_MASK) <<
+ VPFE_BLK_ST_PXL_SHIFT) |
+ ((bclamp->sample_ln & VPFE_BLK_SAMPLE_LINE_MASK) <<
+ VPFE_BLK_SAMPLE_LINE_SHIFT) |
+ ((bclamp->sample_pixel & VPFE_BLK_SAMPLE_LN_MASK) <<
+ VPFE_BLK_SAMPLE_LN_SHIFT) | VPFE_BLK_CLAMP_ENABLE);
+ vpfe_reg_write(ccdc, val, VPFE_CLAMP);
+ /* If Black clamping is enable then make dcsub 0 */
+ vpfe_reg_write(ccdc, VPFE_DCSUB_DEFAULT_VAL, VPFE_DCSUB);
+}
+
+static void
+vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc,
+ struct vpfe_ccdc_black_compensation *bcomp)
+{
+ u32 val;
+
+ val = ((bcomp->b & VPFE_BLK_COMP_MASK) |
+ ((bcomp->gb & VPFE_BLK_COMP_MASK) <<
+ VPFE_BLK_COMP_GB_COMP_SHIFT) |
+ ((bcomp->gr & VPFE_BLK_COMP_MASK) <<
+ VPFE_BLK_COMP_GR_COMP_SHIFT) |
+ ((bcomp->r & VPFE_BLK_COMP_MASK) <<
+ VPFE_BLK_COMP_R_COMP_SHIFT));
+ vpfe_reg_write(ccdc, val, VPFE_BLKCMP);
+}
+
+/*
+ * vpfe_ccdc_config_raw()
+ * This function will configure CCDC for Raw capture mode
+ */
+static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+ struct vpfe_ccdc_config_params_raw *config_params =
+ &ccdc->ccdc_cfg.bayer.config_params;
+ struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer;
+ unsigned int syn_mode;
+ unsigned int val;
+
+ vpfe_dbg(3, vpfe, "vpfe_ccdc_config_raw:\n");
+
+ /* Reset CCDC */
+ vpfe_ccdc_restore_defaults(ccdc);
+
+ /* Disable latching function registers on VSYNC */
+ vpfe_reg_write(ccdc, VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG);
+
+ /*
+ * Configure the vertical sync polarity(SYN_MODE.VDPOL),
+ * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
+ * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
+ * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
+ * SDRAM, enable internal timing generator
+ */
+ syn_mode = (((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT) |
+ ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) |
+ ((params->fid_pol & VPFE_FID_POL_MASK) <<
+ VPFE_FID_POL_SHIFT) | ((params->frm_fmt &
+ VPFE_FRM_FMT_MASK) << VPFE_FRM_FMT_SHIFT) |
+ ((config_params->data_sz & VPFE_DATA_SZ_MASK) <<
+ VPFE_DATA_SZ_SHIFT) | ((params->pix_fmt &
+ VPFE_PIX_FMT_MASK) << VPFE_PIX_FMT_SHIFT) |
+ VPFE_WEN_ENABLE | VPFE_VDHDEN_ENABLE);
+
+ /* Enable and configure aLaw register if needed */
+ if (config_params->alaw.enable) {
+ val = ((config_params->alaw.gamma_wd &
+ VPFE_ALAW_GAMMA_WD_MASK) | VPFE_ALAW_ENABLE);
+ vpfe_reg_write(ccdc, val, VPFE_ALAW);
+ vpfe_dbg(3, vpfe, "\nWriting 0x%x to ALAW...\n", val);
+ }
+
+ /* Configure video window */
+ vpfe_ccdc_setwin(ccdc, &params->win, params->frm_fmt,
+ params->bytesperpixel);
+
+ /* Configure Black Clamp */
+ vpfe_ccdc_config_black_clamp(ccdc, &config_params->blk_clamp);
+
+ /* Configure Black level compensation */
+ vpfe_ccdc_config_black_compense(ccdc, &config_params->blk_comp);
+
+ /* If data size is 8 bit then pack the data */
+ if ((config_params->data_sz == VPFE_CCDC_DATA_8BITS) ||
+ config_params->alaw.enable)
+ syn_mode |= VPFE_DATA_PACK_ENABLE;
+
+ /*
+ * Configure Horizontal offset register. If pack 8 is enabled then
+ * 1 pixel will take 1 byte
+ */
+ vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF);
+
+ vpfe_dbg(3, vpfe, "Writing %d (%x) to HSIZE_OFF\n",
+ params->bytesperline, params->bytesperline);
+
+ /* Set value for SDOFST */
+ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (params->image_invert_enable) {
+ /* For interlace inverse mode */
+ vpfe_reg_write(ccdc, VPFE_INTERLACED_IMAGE_INVERT,
+ VPFE_SDOFST);
+ } else {
+ /* For interlace non inverse mode */
+ vpfe_reg_write(ccdc, VPFE_INTERLACED_NO_IMAGE_INVERT,
+ VPFE_SDOFST);
+ }
+ } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ vpfe_reg_write(ccdc, VPFE_PROGRESSIVE_NO_IMAGE_INVERT,
+ VPFE_SDOFST);
+ }
+
+ vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE);
+
+ vpfe_reg_dump(ccdc);
+}
+
+static inline int
+vpfe_ccdc_set_buftype(struct vpfe_ccdc *ccdc,
+ enum ccdc_buftype buf_type)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc->ccdc_cfg.bayer.buf_type = buf_type;
+ else
+ ccdc->ccdc_cfg.ycbcr.buf_type = buf_type;
+
+ return 0;
+}
+
+static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc->ccdc_cfg.bayer.buf_type;
+
+ return ccdc->ccdc_cfg.ycbcr.buf_type;
+}
+
+static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+
+ vpfe_dbg(1, vpfe, "vpfe_ccdc_set_pixel_format: if_type: %d, pixfmt:%s\n",
+ ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt));
+
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ /*
+ * Need to clear it in case it was left on
+ * after the last capture.
+ */
+ ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 0;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_SBGGR8:
+ ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 1;
+ break;
+
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_RGB565X:
+ break;
+
+ case V4L2_PIX_FMT_SBGGR16:
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_YUYV:
+ ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
+ break;
+
+ case V4L2_PIX_FMT_UYVY:
+ ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static u32 vpfe_ccdc_get_pixel_format(struct vpfe_ccdc *ccdc)
+{
+ u32 pixfmt;
+
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ } else {
+ if (ccdc->ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
+ pixfmt = V4L2_PIX_FMT_YUYV;
+ else
+ pixfmt = V4L2_PIX_FMT_UYVY;
+ }
+
+ return pixfmt;
+}
+
+static int
+vpfe_ccdc_set_image_window(struct vpfe_ccdc *ccdc,
+ struct v4l2_rect *win, unsigned int bpp)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) {
+ ccdc->ccdc_cfg.bayer.win = *win;
+ ccdc->ccdc_cfg.bayer.bytesperpixel = bpp;
+ ccdc->ccdc_cfg.bayer.bytesperline = ALIGN(win->width * bpp, 32);
+ } else {
+ ccdc->ccdc_cfg.ycbcr.win = *win;
+ ccdc->ccdc_cfg.ycbcr.bytesperpixel = bpp;
+ ccdc->ccdc_cfg.ycbcr.bytesperline = ALIGN(win->width * bpp, 32);
+ }
+
+ return 0;
+}
+
+static inline void
+vpfe_ccdc_get_image_window(struct vpfe_ccdc *ccdc,
+ struct v4l2_rect *win)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ *win = ccdc->ccdc_cfg.bayer.win;
+ else
+ *win = ccdc->ccdc_cfg.ycbcr.win;
+}
+
+static inline unsigned int vpfe_ccdc_get_line_length(struct vpfe_ccdc *ccdc)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc->ccdc_cfg.bayer.bytesperline;
+
+ return ccdc->ccdc_cfg.ycbcr.bytesperline;
+}
+
+static inline int
+vpfe_ccdc_set_frame_format(struct vpfe_ccdc *ccdc,
+ enum ccdc_frmfmt frm_fmt)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ ccdc->ccdc_cfg.bayer.frm_fmt = frm_fmt;
+ else
+ ccdc->ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
+
+ return 0;
+}
+
+static inline enum ccdc_frmfmt
+vpfe_ccdc_get_frame_format(struct vpfe_ccdc *ccdc)
+{
+ if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ return ccdc->ccdc_cfg.bayer.frm_fmt;
+
+ return ccdc->ccdc_cfg.ycbcr.frm_fmt;
+}
+
+static inline int vpfe_ccdc_getfid(struct vpfe_ccdc *ccdc)
+{
+ return (vpfe_reg_read(ccdc, VPFE_SYNMODE) >> 15) & 1;
+}
+
+static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr)
+{
+ vpfe_reg_write(ccdc, addr & 0xffffffe0, VPFE_SDR_ADDR);
+}
+
+static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc,
+ struct vpfe_hw_if_param *params)
+{
+ struct vpfe_device *vpfe = container_of(ccdc, struct vpfe_device, ccdc);
+
+ ccdc->ccdc_cfg.if_type = params->if_type;
+
+ switch (params->if_type) {
+ case VPFE_BT656:
+ case VPFE_YCBCR_SYNC_16:
+ case VPFE_YCBCR_SYNC_8:
+ case VPFE_BT656_10BIT:
+ ccdc->ccdc_cfg.ycbcr.vd_pol = params->vdpol;
+ ccdc->ccdc_cfg.ycbcr.hd_pol = params->hdpol;
+ break;
+
+ case VPFE_RAW_BAYER:
+ ccdc->ccdc_cfg.bayer.vd_pol = params->vdpol;
+ ccdc->ccdc_cfg.bayer.hd_pol = params->hdpol;
+ if (params->bus_width == 10)
+ ccdc->ccdc_cfg.bayer.config_params.data_sz =
+ VPFE_CCDC_DATA_10BITS;
+ else
+ ccdc->ccdc_cfg.bayer.config_params.data_sz =
+ VPFE_CCDC_DATA_8BITS;
+ vpfe_dbg(1, vpfe, "params.bus_width: %d\n",
+ params->bus_width);
+ vpfe_dbg(1, vpfe, "config_params.data_sz: %d\n",
+ ccdc->ccdc_cfg.bayer.config_params.data_sz);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vpfe_clear_intr(struct vpfe_ccdc *ccdc, int vdint)
+{
+ unsigned int vpfe_int_status;
+
+ vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
+
+ switch (vdint) {
+ /* VD0 interrupt */
+ case VPFE_VDINT0:
+ vpfe_int_status &= ~VPFE_VDINT0;
+ vpfe_int_status |= VPFE_VDINT0;
+ break;
+
+ /* VD1 interrupt */
+ case VPFE_VDINT1:
+ vpfe_int_status &= ~VPFE_VDINT1;
+ vpfe_int_status |= VPFE_VDINT1;
+ break;
+
+ /* VD2 interrupt */
+ case VPFE_VDINT2:
+ vpfe_int_status &= ~VPFE_VDINT2;
+ vpfe_int_status |= VPFE_VDINT2;
+ break;
+
+ /* Clear all interrupts */
+ default:
+ vpfe_int_status &= ~(VPFE_VDINT0 |
+ VPFE_VDINT1 |
+ VPFE_VDINT2);
+ vpfe_int_status |= (VPFE_VDINT0 |
+ VPFE_VDINT1 |
+ VPFE_VDINT2);
+ break;
+ }
+ /* Clear specific VDINT from the status register */
+ vpfe_reg_write(ccdc, vpfe_int_status, VPFE_IRQ_STS);
+
+ vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS);
+
+ /* Acknowledge that we are done with all interrupts */
+ vpfe_reg_write(ccdc, 1, VPFE_IRQ_EOI);
+}
+
+static void vpfe_ccdc_config_defaults(struct vpfe_ccdc *ccdc)
+{
+ ccdc->ccdc_cfg.if_type = VPFE_RAW_BAYER;
+
+ ccdc->ccdc_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
+ ccdc->ccdc_cfg.ycbcr.frm_fmt = CCDC_FRMFMT_INTERLACED;
+ ccdc->ccdc_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
+ ccdc->ccdc_cfg.ycbcr.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED;
+
+ ccdc->ccdc_cfg.ycbcr.win.left = 0;
+ ccdc->ccdc_cfg.ycbcr.win.top = 0;
+ ccdc->ccdc_cfg.ycbcr.win.width = 720;
+ ccdc->ccdc_cfg.ycbcr.win.height = 576;
+ ccdc->ccdc_cfg.ycbcr.bt656_enable = 1;
+
+ ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
+ ccdc->ccdc_cfg.bayer.frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
+ ccdc->ccdc_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE;
+ ccdc->ccdc_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE;
+
+ ccdc->ccdc_cfg.bayer.win.left = 0;
+ ccdc->ccdc_cfg.bayer.win.top = 0;
+ ccdc->ccdc_cfg.bayer.win.width = 800;
+ ccdc->ccdc_cfg.bayer.win.height = 600;
+ ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS;
+ ccdc->ccdc_cfg.bayer.config_params.alaw.gamma_wd =
+ VPFE_CCDC_GAMMA_BITS_09_0;
+}
+
+/*
+ * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings
+ */
+static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe,
+ struct v4l2_format *f)
+{
+ struct v4l2_rect image_win;
+ enum ccdc_buftype buf_type;
+ enum ccdc_frmfmt frm_fmt;
+
+ memset(f, 0, sizeof(*f));
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
+ f->fmt.pix.width = image_win.width;
+ f->fmt.pix.height = image_win.height;
+ f->fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height;
+ buf_type = vpfe_ccdc_get_buftype(&vpfe->ccdc);
+ f->fmt.pix.pixelformat = vpfe_ccdc_get_pixel_format(&vpfe->ccdc);
+ frm_fmt = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
+
+ if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+ } else if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
+ if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
+ f->fmt.pix.field = V4L2_FIELD_INTERLACED;
+ } else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) {
+ f->fmt.pix.field = V4L2_FIELD_SEQ_TB;
+ } else {
+ vpfe_err(vpfe, "Invalid buf_type\n");
+ return -EINVAL;
+ }
+ } else {
+ vpfe_err(vpfe, "Invalid frm_fmt\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe)
+{
+ enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_config_ccdc_image_format\n");
+
+ vpfe_dbg(1, vpfe, "pixelformat: %s\n",
+ print_fourcc(vpfe->fmt.fmt.pix.pixelformat));
+
+ if (vpfe_ccdc_set_pixel_format(&vpfe->ccdc,
+ vpfe->fmt.fmt.pix.pixelformat) < 0) {
+ vpfe_err(vpfe, "couldn't set pix format in ccdc\n");
+ return -EINVAL;
+ }
+
+ /* configure the image window */
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, vpfe->bpp);
+
+ switch (vpfe->fmt.fmt.pix.field) {
+ case V4L2_FIELD_INTERLACED:
+ /* do nothing, since it is default */
+ ret = vpfe_ccdc_set_buftype(
+ &vpfe->ccdc,
+ CCDC_BUFTYPE_FLD_INTERLEAVED);
+ break;
+
+ case V4L2_FIELD_NONE:
+ frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
+ /* buffer type only applicable for interlaced scan */
+ break;
+
+ case V4L2_FIELD_SEQ_TB:
+ ret = vpfe_ccdc_set_buftype(
+ &vpfe->ccdc,
+ CCDC_BUFTYPE_FLD_SEPARATED);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ return vpfe_ccdc_set_frame_format(&vpfe->ccdc, frm_fmt);
+}
+
+/*
+ * vpfe_config_image_format()
+ * For a given standard, this functions sets up the default
+ * pix format & crop values in the vpfe device and ccdc. It first
+ * starts with defaults based values from the standard table.
+ * It then checks if sub device support g_mbus_fmt and then override the
+ * values based on that.Sets crop values to match with scan resolution
+ * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
+ * values in ccdc
+ */
+static int vpfe_config_image_format(struct vpfe_device *vpfe,
+ v4l2_std_id std_id)
+{
+ struct v4l2_pix_format *pix = &vpfe->fmt.fmt.pix;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
+ if (vpfe_standards[i].std_id & std_id) {
+ vpfe->std_info.active_pixels =
+ vpfe_standards[i].width;
+ vpfe->std_info.active_lines =
+ vpfe_standards[i].height;
+ vpfe->std_info.frame_format =
+ vpfe_standards[i].frame_format;
+ vpfe->std_index = i;
+
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(vpfe_standards)) {
+ vpfe_err(vpfe, "standard not supported\n");
+ return -EINVAL;
+ }
+
+ vpfe->crop.top = vpfe->crop.left = 0;
+ vpfe->crop.width = vpfe->std_info.active_pixels;
+ vpfe->crop.height = vpfe->std_info.active_lines;
+ pix->width = vpfe->crop.width;
+ pix->height = vpfe->crop.height;
+ pix->pixelformat = V4L2_PIX_FMT_YUYV;
+
+ /* first field and frame format based on standard frame format */
+ if (vpfe->std_info.frame_format)
+ pix->field = V4L2_FIELD_INTERLACED;
+ else
+ pix->field = V4L2_FIELD_NONE;
+
+ ret = __vpfe_get_format(vpfe, &vpfe->fmt, &vpfe->bpp);
+ if (ret)
+ return ret;
+
+ /* Update the crop window based on found values */
+ vpfe->crop.width = pix->width;
+ vpfe->crop.height = pix->height;
+
+ return vpfe_config_ccdc_image_format(vpfe);
+}
+
+static int vpfe_initialize_device(struct vpfe_device *vpfe)
+{
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ sdinfo = &vpfe->cfg->sub_devs[0];
+ sdinfo->sd = vpfe->sd[0];
+ vpfe->current_input = 0;
+ vpfe->std_index = 0;
+ /* Configure the default format information */
+ ret = vpfe_config_image_format(vpfe,
+ vpfe_standards[vpfe->std_index].std_id);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(vpfe->pdev);
+
+ vpfe_config_enable(&vpfe->ccdc, 1);
+
+ vpfe_ccdc_restore_defaults(&vpfe->ccdc);
+
+ /* Clear all VPFE interrupts */
+ vpfe_clear_intr(&vpfe->ccdc, -1);
+
+ return ret;
+}
+
+/*
+ * vpfe_release : This function is based on the vb2_fop_release
+ * helper function.
+ * It has been augmented to handle module power management,
+ * by disabling/enabling h/w module fcntl clock when necessary.
+ */
+static int vpfe_release(struct file *file)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&vpfe->lock);
+
+ if (v4l2_fh_is_singular_file(file))
+ vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
+ ret = _vb2_fop_release(file, NULL);
+
+ mutex_unlock(&vpfe->lock);
+
+ return ret;
+}
+
+/*
+ * vpfe_open : This function is based on the v4l2_fh_open helper function.
+ * It has been augmented to handle module power management,
+ * by disabling/enabling h/w module fcntl clock when necessary.
+ */
+static int vpfe_open(struct file *file)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ int ret;
+
+ mutex_lock(&vpfe->lock);
+
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ vpfe_err(vpfe, "v4l2_fh_open failed\n");
+ goto unlock;
+ }
+
+ if (!v4l2_fh_is_singular_file(file))
+ goto unlock;
+
+ if (vpfe_initialize_device(vpfe)) {
+ v4l2_fh_release(file);
+ ret = -ENODEV;
+ }
+
+unlock:
+ mutex_unlock(&vpfe->lock);
+ return ret;
+}
+
+/**
+ * vpfe_schedule_next_buffer: set next buffer address for capture
+ * @vpfe : ptr to vpfe device
+ *
+ * This function will get next buffer from the dma queue and
+ * set the buffer address in the vpfe register for capture.
+ * the buffer is marked active
+ *
+ * Assumes caller is holding vpfe->dma_queue_lock already
+ */
+static inline void vpfe_schedule_next_buffer(struct vpfe_device *vpfe)
+{
+ vpfe->next_frm = list_entry(vpfe->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+ list_del(&vpfe->next_frm->list);
+
+ vpfe_set_sdr_addr(&vpfe->ccdc,
+ vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0));
+}
+
+static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe)
+{
+ unsigned long addr;
+
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb, 0) +
+ vpfe->field_off;
+
+ vpfe_set_sdr_addr(&vpfe->ccdc, addr);
+}
+
+/*
+ * vpfe_process_buffer_complete: process a completed buffer
+ * @vpfe : ptr to vpfe device
+ *
+ * This function time stamp the buffer and mark it as DONE. It also
+ * wake up any process waiting on the QUEUE and set the next buffer
+ * as current
+ */
+static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe)
+{
+ v4l2_get_timestamp(&vpfe->cur_frm->vb.v4l2_buf.timestamp);
+ vpfe->cur_frm->vb.v4l2_buf.field = vpfe->fmt.fmt.pix.field;
+ vpfe->cur_frm->vb.v4l2_buf.sequence = vpfe->sequence++;
+ vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_DONE);
+ vpfe->cur_frm = vpfe->next_frm;
+}
+
+/*
+ * vpfe_isr : ISR handler for vpfe capture (VINT0)
+ * @irq: irq number
+ * @dev_id: dev_id ptr
+ *
+ * It changes status of the captured buffer, takes next buffer from the queue
+ * and sets its address in VPFE registers
+ */
+static irqreturn_t vpfe_isr(int irq, void *dev)
+{
+ struct vpfe_device *vpfe = (struct vpfe_device *)dev;
+ enum v4l2_field field;
+ int intr_status;
+ int fid;
+
+ intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS);
+
+ if (intr_status & VPFE_VDINT0) {
+ field = vpfe->fmt.fmt.pix.field;
+
+ if (field == V4L2_FIELD_NONE) {
+ /* handle progressive frame capture */
+ if (vpfe->cur_frm != vpfe->next_frm)
+ vpfe_process_buffer_complete(vpfe);
+ goto next_intr;
+ }
+
+ /* interlaced or TB capture check which field
+ we are in hardware */
+ fid = vpfe_ccdc_getfid(&vpfe->ccdc);
+
+ /* switch the software maintained field id */
+ vpfe->field ^= 1;
+ if (fid == vpfe->field) {
+ /* we are in-sync here,continue */
+ if (fid == 0) {
+ /*
+ * One frame is just being captured. If the
+ * next frame is available, release the
+ * current frame and move on
+ */
+ if (vpfe->cur_frm != vpfe->next_frm)
+ vpfe_process_buffer_complete(vpfe);
+ /*
+ * based on whether the two fields are stored
+ * interleave or separately in memory,
+ * reconfigure the CCDC memory address
+ */
+ if (field == V4L2_FIELD_SEQ_TB)
+ vpfe_schedule_bottom_field(vpfe);
+
+ goto next_intr;
+ }
+ /*
+ * if one field is just being captured configure
+ * the next frame get the next frame from the empty
+ * queue if no frame is available hold on to the
+ * current buffer
+ */
+ spin_lock(&vpfe->dma_queue_lock);
+ if (!list_empty(&vpfe->dma_queue) &&
+ vpfe->cur_frm == vpfe->next_frm)
+ vpfe_schedule_next_buffer(vpfe);
+ spin_unlock(&vpfe->dma_queue_lock);
+ } else if (fid == 0) {
+ /*
+ * out of sync. Recover from any hardware out-of-sync.
+ * May loose one frame
+ */
+ vpfe->field = fid;
+ }
+ }
+
+next_intr:
+ if (intr_status & VPFE_VDINT1) {
+ spin_lock(&vpfe->dma_queue_lock);
+ if (vpfe->fmt.fmt.pix.field == V4L2_FIELD_NONE &&
+ !list_empty(&vpfe->dma_queue) &&
+ vpfe->cur_frm == vpfe->next_frm)
+ vpfe_schedule_next_buffer(vpfe);
+ spin_unlock(&vpfe->dma_queue_lock);
+ }
+
+ vpfe_clear_intr(&vpfe->ccdc, intr_status);
+
+ return IRQ_HANDLED;
+}
+
+static inline void vpfe_detach_irq(struct vpfe_device *vpfe)
+{
+ unsigned int intr = VPFE_VDINT0;
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
+ intr |= VPFE_VDINT1;
+
+ vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_CLR);
+}
+
+static inline void vpfe_attach_irq(struct vpfe_device *vpfe)
+{
+ unsigned int intr = VPFE_VDINT0;
+ enum ccdc_frmfmt frame_format;
+
+ frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc);
+ if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
+ intr |= VPFE_VDINT1;
+
+ vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_SET);
+}
+
+static int vpfe_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_querycap\n");
+
+ strlcpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info),
+ "platform:%s", vpfe->v4l2_dev.name);
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+/* get the format set at output pad of the adjacent subdev */
+static int __vpfe_get_format(struct vpfe_device *vpfe,
+ struct v4l2_format *format, unsigned int *bpp)
+{
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct vpfe_subdev_info *sdinfo;
+ struct v4l2_subdev_format fmt;
+ int ret;
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.pad = 0;
+
+ ret = v4l2_subdev_call(sdinfo->sd, pad, get_fmt, NULL, &fmt);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ if (!ret) {
+ v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
+ mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
+ } else {
+ ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
+ sdinfo->grp_id,
+ video, g_mbus_fmt,
+ &mbus_fmt);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+ v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
+ mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
+ }
+
+ format->type = vpfe->fmt.type;
+
+ vpfe_dbg(1, vpfe,
+ "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
+ __func__, format->fmt.pix.width, format->fmt.pix.height,
+ print_fourcc(format->fmt.pix.pixelformat),
+ format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+
+ return 0;
+}
+
+/* set the format at output pad of the adjacent subdev */
+static int __vpfe_set_format(struct vpfe_device *vpfe,
+ struct v4l2_format *format, unsigned int *bpp)
+{
+ struct v4l2_mbus_framefmt mbus_fmt;
+ struct vpfe_subdev_info *sdinfo;
+ struct v4l2_subdev_format fmt;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "__vpfe_set_format\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.pad = 0;
+
+ pix_to_mbus(vpfe, &format->fmt.pix, &fmt.format);
+
+ ret = v4l2_subdev_call(sdinfo->sd, pad, set_fmt, NULL, &fmt);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ if (!ret) {
+ v4l2_fill_pix_format(&format->fmt.pix, &fmt.format);
+ mbus_to_pix(vpfe, &fmt.format, &format->fmt.pix, bpp);
+ } else {
+ ret = v4l2_device_call_until_err(&vpfe->v4l2_dev,
+ sdinfo->grp_id,
+ video, s_mbus_fmt,
+ &mbus_fmt);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ return ret;
+
+ v4l2_fill_pix_format(&format->fmt.pix, &mbus_fmt);
+ mbus_to_pix(vpfe, &mbus_fmt, &format->fmt.pix, bpp);
+ }
+
+ format->type = vpfe->fmt.type;
+
+ vpfe_dbg(1, vpfe,
+ "%s size %dx%d (%s) bytesperline = %d, size = %d, bpp = %d\n",
+ __func__, format->fmt.pix.width, format->fmt.pix.height,
+ print_fourcc(format->fmt.pix.pixelformat),
+ format->fmt.pix.bytesperline, format->fmt.pix.sizeimage, *bpp);
+
+ return 0;
+}
+
+static int vpfe_g_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_g_fmt\n");
+
+ *fmt = vpfe->fmt;
+
+ return 0;
+}
+
+static int vpfe_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ struct vpfe_fmt *fmt = NULL;
+ unsigned int k;
+
+ vpfe_dbg(2, vpfe, "vpfe_enum_format index:%d\n",
+ f->index);
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ if (f->index > ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ for (k = 0; k < ARRAY_SIZE(formats); k++) {
+ if (formats[k].index == f->index) {
+ fmt = &formats[k];
+ break;
+ }
+ }
+ if (!fmt)
+ return -EINVAL;
+
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+ f->pixelformat = fmt->fourcc;
+ f->type = vpfe->fmt.type;
+
+ vpfe_dbg(1, vpfe, "vpfe_enum_format: mbus index: %d code: %x pixelformat: %s [%s]\n",
+ f->index, fmt->code, print_fourcc(fmt->fourcc), fmt->name);
+
+ return 0;
+}
+
+static int vpfe_try_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ unsigned int bpp;
+
+ vpfe_dbg(2, vpfe, "vpfe_try_fmt\n");
+
+ return __vpfe_get_format(vpfe, fmt, &bpp);
+}
+
+static int vpfe_s_fmt(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct v4l2_format format;
+ unsigned int bpp;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_s_fmt\n");
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ ret = vpfe_try_fmt(file, priv, fmt);
+ if (ret)
+ return ret;
+
+
+ if (!cmp_v4l2_format(fmt, &format)) {
+ /* Sensor format is different from the requested format
+ * so we need to change it
+ */
+ ret = __vpfe_set_format(vpfe, fmt, &bpp);
+ if (ret)
+ return ret;
+ } else /* Just make sure all of the fields are consistent */
+ *fmt = format;
+
+ /* First detach any IRQ if currently attached */
+ vpfe_detach_irq(vpfe);
+ vpfe->fmt = *fmt;
+ vpfe->bpp = bpp;
+
+ /* Update the crop window based on found values */
+ vpfe->crop.width = fmt->fmt.pix.width;
+ vpfe->crop.height = fmt->fmt.pix.height;
+
+ /* set image capture parameters in the ccdc */
+ return vpfe_config_ccdc_image_format(vpfe);
+}
+
+static int vpfe_enum_size(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct v4l2_subdev_frame_size_enum fse;
+ struct vpfe_subdev_info *sdinfo;
+ struct v4l2_mbus_framefmt mbus;
+ struct v4l2_pix_format pix;
+ struct vpfe_fmt *fmt;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_enum_size\n");
+
+ /* check for valid format */
+ fmt = find_format_by_pix(fsize->pixel_format);
+ if (!fmt) {
+ vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ memset(fsize->reserved, 0x0, sizeof(fsize->reserved));
+
+ sdinfo = vpfe->current_subdev;
+ if (!sdinfo->sd)
+ return -EINVAL;
+
+ memset(&pix, 0x0, sizeof(pix));
+ /* Construct pix from parameter and use default for the rest */
+ pix.pixelformat = fsize->pixel_format;
+ pix.width = 640;
+ pix.height = 480;
+ pix.colorspace = V4L2_COLORSPACE_SRGB;
+ pix.field = V4L2_FIELD_NONE;
+ pix_to_mbus(vpfe, &pix, &mbus);
+
+ memset(&fse, 0x0, sizeof(fse));
+ fse.index = fsize->index;
+ fse.pad = 0;
+ fse.code = mbus.code;
+ ret = v4l2_subdev_call(sdinfo->sd, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return -EINVAL;
+
+ vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+ fse.index, fse.code, fse.min_width, fse.max_width,
+ fse.min_height, fse.max_height);
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.max_width;
+ fsize->discrete.height = fse.max_height;
+
+ vpfe_dbg(1, vpfe, "vpfe_enum_size: index: %d pixformat: %s size: %dx%d\n",
+ fsize->index, print_fourcc(fsize->pixel_format),
+ fsize->discrete.width, fsize->discrete.height);
+
+ return 0;
+}
+
+/*
+ * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
+ * given app input index
+ */
+static int
+vpfe_get_subdev_input_index(struct vpfe_device *vpfe,
+ int *subdev_index,
+ int *subdev_input_index,
+ int app_input_index)
+{
+ struct vpfe_config *cfg = vpfe->cfg;
+ struct vpfe_subdev_info *sdinfo;
+ int i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
+ sdinfo = &cfg->sub_devs[i];
+ if (app_input_index < (j + 1)) {
+ *subdev_index = i;
+ *subdev_input_index = app_input_index - j;
+ return 0;
+ }
+ j++;
+ }
+ return -EINVAL;
+}
+
+/*
+ * vpfe_get_app_input - Get app input index for a given subdev input index
+ * driver stores the input index of the current sub device and translate it
+ * when application request the current input
+ */
+static int vpfe_get_app_input_index(struct vpfe_device *vpfe,
+ int *app_input_index)
+{
+ struct vpfe_config *cfg = vpfe->cfg;
+ struct vpfe_subdev_info *sdinfo;
+ int i, j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
+ sdinfo = &cfg->sub_devs[i];
+ if (!strcmp(sdinfo->name, vpfe->current_subdev->name)) {
+ if (vpfe->current_input >= 1)
+ return -1;
+ *app_input_index = j + vpfe->current_input;
+ return 0;
+ }
+ j++;
+ }
+ return -EINVAL;
+}
+
+static int vpfe_enum_input(struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int subdev, index;
+
+ vpfe_dbg(2, vpfe, "vpfe_enum_input\n");
+
+ if (vpfe_get_subdev_input_index(vpfe, &subdev, &index,
+ inp->index) < 0) {
+ vpfe_dbg(1, vpfe,
+ "input information not found for the subdev\n");
+ return -EINVAL;
+ }
+ sdinfo = &vpfe->cfg->sub_devs[subdev];
+ *inp = sdinfo->inputs[index];
+
+ return 0;
+}
+
+static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_g_input\n");
+
+ return vpfe_get_app_input_index(vpfe, index);
+}
+
+/* Assumes caller is holding vpfe_dev->lock */
+static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index)
+{
+ int subdev_index = 0, inp_index = 0;
+ struct vpfe_subdev_info *sdinfo;
+ struct vpfe_route *route;
+ u32 input, output;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_set_input: index: %d\n", index);
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+ ret = vpfe_get_subdev_input_index(vpfe,
+ &subdev_index,
+ &inp_index,
+ index);
+ if (ret < 0) {
+ vpfe_err(vpfe, "invalid input index: %d\n", index);
+ goto get_out;
+ }
+
+ sdinfo = &vpfe->cfg->sub_devs[subdev_index];
+ sdinfo->sd = vpfe->sd[subdev_index];
+ route = &sdinfo->routes[inp_index];
+ if (route && sdinfo->can_route) {
+ input = route->input;
+ output = route->output;
+ if (sdinfo->sd) {
+ ret = v4l2_subdev_call(sdinfo->sd, video,
+ s_routing, input, output, 0);
+ if (ret) {
+ vpfe_err(vpfe, "s_routing failed\n");
+ ret = -EINVAL;
+ goto get_out;
+ }
+ }
+
+ }
+
+ vpfe->current_subdev = sdinfo;
+ if (sdinfo->sd)
+ vpfe->v4l2_dev.ctrl_handler = sdinfo->sd->ctrl_handler;
+ vpfe->current_input = index;
+ vpfe->std_index = 0;
+
+ /* set the bus/interface parameter for the sub device in ccdc */
+ ret = vpfe_ccdc_set_hw_if_params(&vpfe->ccdc, &sdinfo->vpfe_param);
+ if (ret)
+ return ret;
+
+ /* set the default image parameters in the device */
+ return vpfe_config_image_format(vpfe,
+ vpfe_standards[vpfe->std_index].std_id);
+
+get_out:
+ return ret;
+}
+
+static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe,
+ "vpfe_s_input: index: %d\n", index);
+
+ return vpfe_set_input(vpfe, index);
+}
+
+static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+
+ vpfe_dbg(2, vpfe, "vpfe_querystd\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
+ return -ENODATA;
+
+ /* Call querystd function of decoder device */
+ return v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
+ video, querystd, std_id);
+}
+
+static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_s_std\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD))
+ return -ENODATA;
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ ret = -EBUSY;
+ return ret;
+ }
+
+ ret = v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id,
+ video, s_std, std_id);
+ if (ret < 0) {
+ vpfe_err(vpfe, "Failed to set standard\n");
+ return ret;
+ }
+ ret = vpfe_config_image_format(vpfe, std_id);
+
+ return ret;
+}
+
+static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct vpfe_subdev_info *sdinfo;
+
+ vpfe_dbg(2, vpfe, "vpfe_g_std\n");
+
+ sdinfo = vpfe->current_subdev;
+ if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD)
+ return -ENODATA;
+
+ *std_id = vpfe_standards[vpfe->std_index].std_id;
+
+ return 0;
+}
+
+/*
+ * vpfe_calculate_offsets : This function calculates buffers offset
+ * for top and bottom field
+ */
+static void vpfe_calculate_offsets(struct vpfe_device *vpfe)
+{
+ struct v4l2_rect image_win;
+
+ vpfe_dbg(2, vpfe, "vpfe_calculate_offsets\n");
+
+ vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win);
+ vpfe->field_off = image_win.height * image_win.width;
+}
+
+/*
+ * vpfe_queue_setup - Callback function for buffer setup.
+ * @vq: vb2_queue ptr
+ * @fmt: v4l2 format
+ * @nbuffers: ptr to number of buffers requested by application
+ * @nplanes:: contains number of distinct video planes needed to hold a frame
+ * @sizes[]: contains the size (in bytes) of each plane.
+ * @alloc_ctxs: ptr to allocation context
+ *
+ * This callback function is called when reqbuf() is called to adjust
+ * the buffer count and buffer size
+ */
+static int vpfe_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
+
+ if (fmt && fmt->fmt.pix.sizeimage < vpfe->fmt.fmt.pix.sizeimage)
+ return -EINVAL;
+
+ if (vq->num_buffers + *nbuffers < 3)
+ *nbuffers = 3 - vq->num_buffers;
+
+ *nplanes = 1;
+ sizes[0] = fmt ? fmt->fmt.pix.sizeimage : vpfe->fmt.fmt.pix.sizeimage;
+ alloc_ctxs[0] = vpfe->alloc_ctx;
+
+ vpfe_dbg(1, vpfe,
+ "nbuffers=%d, size=%u\n", *nbuffers, sizes[0]);
+
+ /* Calculate field offset */
+ vpfe_calculate_offsets(vpfe);
+
+ return 0;
+}
+
+/*
+ * vpfe_buffer_prepare : callback function for buffer prepare
+ * @vb: ptr to vb2_buffer
+ *
+ * This is the callback function for buffer prepare when vb2_qbuf()
+ * function is called. The buffer is prepared and user space virtual address
+ * or user address is converted into physical address
+ */
+static int vpfe_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
+
+ vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage);
+
+ if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
+ return -EINVAL;
+
+ vb->v4l2_buf.field = vpfe->fmt.fmt.pix.field;
+
+ return 0;
+}
+
+/*
+ * vpfe_buffer_queue : Callback function to add buffer to DMA queue
+ * @vb: ptr to vb2_buffer
+ */
+static void vpfe_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue);
+ struct vpfe_cap_buffer *buf = to_vpfe_buffer(vb);
+ unsigned long flags = 0;
+
+ /* add the buffer to the DMA queue */
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+ list_add_tail(&buf->list, &vpfe->dma_queue);
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+}
+
+/*
+ * vpfe_start_streaming : Starts the DMA engine for streaming
+ * @vb: ptr to vb2_buffer
+ * @count: number of buffers
+ */
+static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
+ struct vpfe_cap_buffer *buf, *tmp;
+ struct vpfe_subdev_info *sdinfo;
+ unsigned long flags;
+ unsigned long addr;
+ int ret;
+
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+
+ vpfe->field = 0;
+ vpfe->sequence = 0;
+
+ sdinfo = vpfe->current_subdev;
+
+ vpfe_attach_irq(vpfe);
+
+ if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER)
+ vpfe_ccdc_config_raw(&vpfe->ccdc);
+ else
+ vpfe_ccdc_config_ycbcr(&vpfe->ccdc);
+
+ /* Get the next frame from the buffer queue */
+ vpfe->next_frm = list_entry(vpfe->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+ vpfe->cur_frm = vpfe->next_frm;
+ /* Remove buffer from the buffer queue */
+ list_del(&vpfe->cur_frm->list);
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+
+ addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb, 0);
+
+ vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr));
+
+ vpfe_pcr_enable(&vpfe->ccdc, 1);
+
+ ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 1);
+ if (ret < 0) {
+ vpfe_err(vpfe, "Error in attaching interrupt handle\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(buf, tmp, &vpfe->dma_queue, list) {
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
+ }
+
+ return ret;
+}
+
+/*
+ * vpfe_stop_streaming : Stop the DMA engine
+ * @vq: ptr to vb2_queue
+ *
+ * This callback stops the DMA engine and any remaining buffers
+ * in the DMA queue are released.
+ */
+static void vpfe_stop_streaming(struct vb2_queue *vq)
+{
+ struct vpfe_device *vpfe = vb2_get_drv_priv(vq);
+ struct vpfe_subdev_info *sdinfo;
+ unsigned long flags;
+ int ret;
+
+ vpfe_pcr_enable(&vpfe->ccdc, 0);
+
+ vpfe_detach_irq(vpfe);
+
+ sdinfo = vpfe->current_subdev;
+ ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 0);
+ if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV)
+ vpfe_dbg(1, vpfe, "stream off failed in subdev\n");
+
+ /* release all active buffers */
+ spin_lock_irqsave(&vpfe->dma_queue_lock, flags);
+ if (vpfe->cur_frm == vpfe->next_frm) {
+ vb2_buffer_done(&vpfe->cur_frm->vb, VB2_BUF_STATE_ERROR);
+ } else {
+ if (vpfe->cur_frm != NULL)
+ vb2_buffer_done(&vpfe->cur_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ if (vpfe->next_frm != NULL)
+ vb2_buffer_done(&vpfe->next_frm->vb,
+ VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&vpfe->dma_queue)) {
+ vpfe->next_frm = list_entry(vpfe->dma_queue.next,
+ struct vpfe_cap_buffer, list);
+ list_del(&vpfe->next_frm->list);
+ vb2_buffer_done(&vpfe->next_frm->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
+}
+
+static int vpfe_cropcap(struct file *file, void *priv,
+ struct v4l2_cropcap *crop)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ vpfe_dbg(2, vpfe, "vpfe_cropcap\n");
+
+ if (vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
+ return -EINVAL;
+
+ memset(crop, 0, sizeof(struct v4l2_cropcap));
+
+ crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ crop->defrect.width = vpfe_standards[vpfe->std_index].width;
+ crop->bounds.width = crop->defrect.width;
+ crop->defrect.height = vpfe_standards[vpfe->std_index].height;
+ crop->bounds.height = crop->defrect.height;
+ crop->pixelaspect = vpfe_standards[vpfe->std_index].pixelaspect;
+
+ return 0;
+}
+
+static int
+vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = s->r.top = 0;
+ s->r.width = vpfe->crop.width;
+ s->r.height = vpfe->crop.height;
+ break;
+
+ case V4L2_SEL_TGT_CROP:
+ s->r = vpfe->crop;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int
+vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ struct v4l2_rect cr = vpfe->crop;
+ struct v4l2_rect r = s->r;
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ v4l_bound_align_image(&r.width, 0, cr.width, 0,
+ &r.height, 0, cr.height, 0, 0);
+
+ r.left = clamp_t(unsigned int, r.left, 0, cr.width - r.width);
+ r.top = clamp_t(unsigned int, r.top, 0, cr.height - r.height);
+
+ if (s->flags & V4L2_SEL_FLAG_LE && !enclosed_rectangle(&r, &s->r))
+ return -ERANGE;
+
+ if (s->flags & V4L2_SEL_FLAG_GE && !enclosed_rectangle(&s->r, &r))
+ return -ERANGE;
+
+ s->r = vpfe->crop = r;
+
+ vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, vpfe->bpp);
+ vpfe->fmt.fmt.pix.width = r.width;
+ vpfe->fmt.fmt.pix.height = r.height;
+ vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc);
+ vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline *
+ vpfe->fmt.fmt.pix.height;
+
+ vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n",
+ r.left, r.top, r.width, r.height, cr.width, cr.height);
+
+ return 0;
+}
+
+static long vpfe_ioctl_default(struct file *file, void *priv,
+ bool valid_prio, unsigned int cmd, void *param)
+{
+ struct vpfe_device *vpfe = video_drvdata(file);
+ int ret;
+
+ vpfe_dbg(2, vpfe, "vpfe_ioctl_default\n");
+
+ if (!valid_prio) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ /* If streaming is started, return error */
+ if (vb2_is_busy(&vpfe->buffer_queue)) {
+ vpfe_err(vpfe, "%s device busy\n", __func__);
+ return -EBUSY;
+ }
+
+ switch (cmd) {
+ case VIDIOC_AM437X_CCDC_CFG:
+ ret = vpfe_ccdc_set_params(&vpfe->ccdc, (void __user *)param);
+ if (ret) {
+ vpfe_dbg(2, vpfe,
+ "Error setting parameters in CCDC\n");
+ return ret;
+ }
+ ret = vpfe_get_ccdc_image_format(vpfe,
+ &vpfe->fmt);
+ if (ret < 0) {
+ vpfe_dbg(2, vpfe,
+ "Invalid image format at CCDC\n");
+ return ret;
+ }
+ break;
+
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct vb2_ops vpfe_video_qops = {
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .queue_setup = vpfe_queue_setup,
+ .buf_prepare = vpfe_buffer_prepare,
+ .buf_queue = vpfe_buffer_queue,
+ .start_streaming = vpfe_start_streaming,
+ .stop_streaming = vpfe_stop_streaming,
+};
+
+/* vpfe capture driver file operations */
+static const struct v4l2_file_operations vpfe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpfe_open,
+ .release = vpfe_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/* vpfe capture ioctl operations */
+static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
+ .vidioc_querycap = vpfe_querycap,
+ .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt,
+ .vidioc_g_fmt_vid_cap = vpfe_g_fmt,
+ .vidioc_s_fmt_vid_cap = vpfe_s_fmt,
+ .vidioc_try_fmt_vid_cap = vpfe_try_fmt,
+
+ .vidioc_enum_framesizes = vpfe_enum_size,
+
+ .vidioc_enum_input = vpfe_enum_input,
+ .vidioc_g_input = vpfe_g_input,
+ .vidioc_s_input = vpfe_s_input,
+
+ .vidioc_querystd = vpfe_querystd,
+ .vidioc_s_std = vpfe_s_std,
+ .vidioc_g_std = vpfe_g_std,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_cropcap = vpfe_cropcap,
+ .vidioc_g_selection = vpfe_g_selection,
+ .vidioc_s_selection = vpfe_s_selection,
+
+ .vidioc_default = vpfe_ioctl_default,
+};
+
+static int
+vpfe_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
+ struct vpfe_device, v4l2_dev);
+ struct v4l2_subdev_mbus_code_enum mbus_code;
+ struct vpfe_subdev_info *sdinfo;
+ bool found = false;
+ int i, j;
+
+ vpfe_dbg(1, vpfe, "vpfe_async_bound\n");
+
+ for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) {
+ sdinfo = &vpfe->cfg->sub_devs[i];
+
+ if (!strcmp(sdinfo->name, subdev->name)) {
+ vpfe->sd[i] = subdev;
+ vpfe_info(vpfe,
+ "v4l2 sub device %s registered\n",
+ subdev->name);
+ vpfe->sd[i]->grp_id =
+ sdinfo->grp_id;
+ /* update tvnorms from the sub devices */
+ for (j = 0; j < 1; j++)
+ vpfe->video_dev->tvnorms |=
+ sdinfo->inputs[j].std;
+
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ vpfe_info(vpfe, "sub device (%s) not matched\n", subdev->name);
+ return -EINVAL;
+ }
+
+ /* setup the supported formats & indexes */
+ for (j = 0, i = 0; ; ++j) {
+ struct vpfe_fmt *fmt;
+ int ret;
+
+ memset(&mbus_code, 0, sizeof(mbus_code));
+ mbus_code.index = j;
+ ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
+ NULL, &mbus_code);
+ if (ret)
+ break;
+
+ fmt = find_format_by_code(mbus_code.code);
+ if (!fmt)
+ continue;
+
+ fmt->supported = true;
+ fmt->index = i++;
+ }
+
+ return 0;
+}
+
+static int vpfe_probe_complete(struct vpfe_device *vpfe)
+{
+ struct video_device *vdev;
+ struct vb2_queue *q;
+ int err;
+
+ spin_lock_init(&vpfe->dma_queue_lock);
+ mutex_init(&vpfe->lock);
+
+ vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+ /* set first sub device as current one */
+ vpfe->current_subdev = &vpfe->cfg->sub_devs[0];
+ vpfe->v4l2_dev.ctrl_handler = vpfe->sd[0]->ctrl_handler;
+
+ err = vpfe_set_input(vpfe, 0);
+ if (err)
+ goto probe_out;
+
+ /* Initialize videobuf2 queue as per the buffer type */
+ vpfe->alloc_ctx = vb2_dma_contig_init_ctx(vpfe->pdev);
+ if (IS_ERR(vpfe->alloc_ctx)) {
+ vpfe_err(vpfe, "Failed to get the context\n");
+ err = PTR_ERR(vpfe->alloc_ctx);
+ goto probe_out;
+ }
+
+ q = &vpfe->buffer_queue;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+ q->drv_priv = vpfe;
+ q->ops = &vpfe_video_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &vpfe->lock;
+ q->min_buffers_needed = 1;
+
+ err = vb2_queue_init(q);
+ if (err) {
+ vpfe_err(vpfe, "vb2_queue_init() failed\n");
+ vb2_dma_contig_cleanup_ctx(vpfe->alloc_ctx);
+ goto probe_out;
+ }
+
+ INIT_LIST_HEAD(&vpfe->dma_queue);
+
+ vdev = vpfe->video_dev;
+ strlcpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name));
+ vdev->release = video_device_release;
+ vdev->fops = &vpfe_fops;
+ vdev->ioctl_ops = &vpfe_ioctl_ops;
+ vdev->v4l2_dev = &vpfe->v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->queue = q;
+ vdev->lock = &vpfe->lock;
+ video_set_drvdata(vdev, vpfe);
+ err = video_register_device(vpfe->video_dev, VFL_TYPE_GRABBER, -1);
+ if (err) {
+ vpfe_err(vpfe,
+ "Unable to register video device.\n");
+ goto probe_out;
+ }
+
+ return 0;
+
+probe_out:
+ v4l2_device_unregister(&vpfe->v4l2_dev);
+ return err;
+}
+
+static int vpfe_async_complete(struct v4l2_async_notifier *notifier)
+{
+ struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
+ struct vpfe_device, v4l2_dev);
+
+ return vpfe_probe_complete(vpfe);
+}
+
+static struct vpfe_config *
+vpfe_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *endpoint = NULL, *rem = NULL;
+ struct v4l2_of_endpoint bus_cfg;
+ struct vpfe_subdev_info *sdinfo;
+ struct vpfe_config *pdata;
+ unsigned int flags;
+ unsigned int i;
+ int err;
+
+ dev_dbg(&pdev->dev, "vpfe_get_pdata\n");
+
+ if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
+ return pdev->dev.platform_data;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ for (i = 0; ; i++) {
+ endpoint = of_graph_get_next_endpoint(pdev->dev.of_node,
+ endpoint);
+ if (!endpoint)
+ break;
+
+ sdinfo = &pdata->sub_devs[i];
+ sdinfo->grp_id = 0;
+
+ /* we only support camera */
+ sdinfo->inputs[0].index = i;
+ strcpy(sdinfo->inputs[0].name, "Camera");
+ sdinfo->inputs[0].type = V4L2_INPUT_TYPE_CAMERA;
+ sdinfo->inputs[0].std = V4L2_STD_ALL;
+ sdinfo->inputs[0].capabilities = V4L2_IN_CAP_STD;
+
+ sdinfo->can_route = 0;
+ sdinfo->routes = NULL;
+
+ of_property_read_u32(endpoint, "ti,am437x-vpfe-interface",
+ &sdinfo->vpfe_param.if_type);
+ if (sdinfo->vpfe_param.if_type < 0 ||
+ sdinfo->vpfe_param.if_type > 4) {
+ sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER;
+ }
+
+ err = v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+ if (err) {
+ dev_err(&pdev->dev, "Could not parse the endpoint\n");
+ goto done;
+ }
+
+ sdinfo->vpfe_param.bus_width = bus_cfg.bus.parallel.bus_width;
+
+ if (sdinfo->vpfe_param.bus_width < 8 ||
+ sdinfo->vpfe_param.bus_width > 16) {
+ dev_err(&pdev->dev, "Invalid bus width.\n");
+ goto done;
+ }
+
+ flags = bus_cfg.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ sdinfo->vpfe_param.hdpol = 1;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ sdinfo->vpfe_param.vdpol = 1;
+
+ rem = of_graph_get_remote_port_parent(endpoint);
+ if (!rem) {
+ dev_err(&pdev->dev, "Remote device at %s not found\n",
+ endpoint->full_name);
+ goto done;
+ }
+
+ strncpy(sdinfo->name, rem->name, sizeof(sdinfo->name));
+
+ pdata->asd[i] = devm_kzalloc(&pdev->dev,
+ sizeof(struct v4l2_async_subdev),
+ GFP_KERNEL);
+ pdata->asd[i]->match_type = V4L2_ASYNC_MATCH_OF;
+ pdata->asd[i]->match.of.node = rem;
+ of_node_put(endpoint);
+ of_node_put(rem);
+ }
+
+ of_node_put(endpoint);
+ return pdata;
+
+done:
+ of_node_put(endpoint);
+ of_node_put(rem);
+ return NULL;
+}
+
+/*
+ * vpfe_probe : This function creates device entries by register
+ * itself to the V4L2 driver and initializes fields of each
+ * device objects
+ */
+static int vpfe_probe(struct platform_device *pdev)
+{
+ struct vpfe_config *vpfe_cfg = vpfe_get_pdata(pdev);
+ struct vpfe_device *vpfe;
+ struct vpfe_ccdc *ccdc;
+ struct resource *res;
+ int ret;
+
+ if (!vpfe_cfg) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
+ if (!vpfe)
+ return -ENOMEM;
+
+ vpfe->pdev = &pdev->dev;
+ vpfe->cfg = vpfe_cfg;
+ ccdc = &vpfe->ccdc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ccdc->ccdc_cfg.base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ccdc->ccdc_cfg.base_addr))
+ return PTR_ERR(ccdc->ccdc_cfg.base_addr);
+
+ vpfe->irq = platform_get_irq(pdev, 0);
+ if (vpfe->irq <= 0) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0,
+ "vpfe_capture0", vpfe);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to request interrupt\n");
+ return -EINVAL;
+ }
+
+ vpfe->video_dev = video_device_alloc();
+ if (!vpfe->video_dev) {
+ dev_err(&pdev->dev, "Unable to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
+ if (ret) {
+ vpfe_err(vpfe,
+ "Unable to register v4l2 device.\n");
+ goto probe_out_video_release;
+ }
+
+ /* set the driver data in platform device */
+ platform_set_drvdata(pdev, vpfe);
+ /* Enabling module functional clock */
+ pm_runtime_enable(&pdev->dev);
+
+ /* for now just enable it here instead of waiting for the open */
+ pm_runtime_get_sync(&pdev->dev);
+
+ vpfe_ccdc_config_defaults(ccdc);
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ vpfe->sd = devm_kzalloc(&pdev->dev, sizeof(struct v4l2_subdev *) *
+ ARRAY_SIZE(vpfe->cfg->asd), GFP_KERNEL);
+ if (!vpfe->sd) {
+ ret = -ENOMEM;
+ goto probe_out_v4l2_unregister;
+ }
+
+ vpfe->notifier.subdevs = vpfe->cfg->asd;
+ vpfe->notifier.num_subdevs = ARRAY_SIZE(vpfe->cfg->asd);
+ vpfe->notifier.bound = vpfe_async_bound;
+ vpfe->notifier.complete = vpfe_async_complete;
+ ret = v4l2_async_notifier_register(&vpfe->v4l2_dev,
+ &vpfe->notifier);
+ if (ret) {
+ vpfe_err(vpfe, "Error registering async notifier\n");
+ ret = -EINVAL;
+ goto probe_out_v4l2_unregister;
+ }
+
+ return 0;
+
+probe_out_v4l2_unregister:
+ v4l2_device_unregister(&vpfe->v4l2_dev);
+probe_out_video_release:
+ if (!video_is_registered(vpfe->video_dev))
+ video_device_release(vpfe->video_dev);
+ return ret;
+}
+
+/*
+ * vpfe_remove : It un-register device from V4L2 driver
+ */
+static int vpfe_remove(struct platform_device *pdev)
+{
+ struct vpfe_device *vpfe = platform_get_drvdata(pdev);
+
+ vpfe_dbg(2, vpfe, "vpfe_remove\n");
+
+ pm_runtime_disable(&pdev->dev);
+
+ v4l2_async_notifier_unregister(&vpfe->notifier);
+ v4l2_device_unregister(&vpfe->v4l2_dev);
+ video_unregister_device(vpfe->video_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static void vpfe_save_context(struct vpfe_ccdc *ccdc)
+{
+ ccdc->ccdc_ctx[VPFE_PCR >> 2] = vpfe_reg_read(ccdc, VPFE_PCR);
+ ccdc->ccdc_ctx[VPFE_SYNMODE >> 2] = vpfe_reg_read(ccdc, VPFE_SYNMODE);
+ ccdc->ccdc_ctx[VPFE_SDOFST >> 2] = vpfe_reg_read(ccdc, VPFE_SDOFST);
+ ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2] = vpfe_reg_read(ccdc, VPFE_SDR_ADDR);
+ ccdc->ccdc_ctx[VPFE_CLAMP >> 2] = vpfe_reg_read(ccdc, VPFE_CLAMP);
+ ccdc->ccdc_ctx[VPFE_DCSUB >> 2] = vpfe_reg_read(ccdc, VPFE_DCSUB);
+ ccdc->ccdc_ctx[VPFE_COLPTN >> 2] = vpfe_reg_read(ccdc, VPFE_COLPTN);
+ ccdc->ccdc_ctx[VPFE_BLKCMP >> 2] = vpfe_reg_read(ccdc, VPFE_BLKCMP);
+ ccdc->ccdc_ctx[VPFE_VDINT >> 2] = vpfe_reg_read(ccdc, VPFE_VDINT);
+ ccdc->ccdc_ctx[VPFE_ALAW >> 2] = vpfe_reg_read(ccdc, VPFE_ALAW);
+ ccdc->ccdc_ctx[VPFE_REC656IF >> 2] = vpfe_reg_read(ccdc, VPFE_REC656IF);
+ ccdc->ccdc_ctx[VPFE_CCDCFG >> 2] = vpfe_reg_read(ccdc, VPFE_CCDCFG);
+ ccdc->ccdc_ctx[VPFE_CULLING >> 2] = vpfe_reg_read(ccdc, VPFE_CULLING);
+ ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2] = vpfe_reg_read(ccdc,
+ VPFE_HD_VD_WID);
+ ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2] = vpfe_reg_read(ccdc,
+ VPFE_PIX_LINES);
+ ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2] = vpfe_reg_read(ccdc,
+ VPFE_HORZ_INFO);
+ ccdc->ccdc_ctx[VPFE_VERT_START >> 2] = vpfe_reg_read(ccdc,
+ VPFE_VERT_START);
+ ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2] = vpfe_reg_read(ccdc,
+ VPFE_VERT_LINES);
+ ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2] = vpfe_reg_read(ccdc,
+ VPFE_HSIZE_OFF);
+}
+
+static int vpfe_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpfe_device *vpfe = platform_get_drvdata(pdev);
+ struct vpfe_ccdc *ccdc = &vpfe->ccdc;
+
+ /* if streaming has not started we don't care */
+ if (!vb2_start_streaming_called(&vpfe->buffer_queue))
+ return 0;
+
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
+
+ /* Save VPFE context */
+ vpfe_save_context(ccdc);
+
+ /* Disable CCDC */
+ vpfe_pcr_enable(ccdc, 0);
+ vpfe_config_enable(ccdc, 0);
+
+ /* Disable both master and slave clock */
+ pm_runtime_put_sync(dev);
+
+ /* Select sleep pin state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static void vpfe_restore_context(struct vpfe_ccdc *ccdc)
+{
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SYNMODE >> 2], VPFE_SYNMODE);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CULLING >> 2], VPFE_CULLING);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDOFST >> 2], VPFE_SDOFST);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2], VPFE_SDR_ADDR);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CLAMP >> 2], VPFE_CLAMP);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_DCSUB >> 2], VPFE_DCSUB);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_COLPTN >> 2], VPFE_COLPTN);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_BLKCMP >> 2], VPFE_BLKCMP);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VDINT >> 2], VPFE_VDINT);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_ALAW >> 2], VPFE_ALAW);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_REC656IF >> 2], VPFE_REC656IF);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CCDCFG >> 2], VPFE_CCDCFG);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PCR >> 2], VPFE_PCR);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2],
+ VPFE_HD_VD_WID);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2],
+ VPFE_PIX_LINES);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2],
+ VPFE_HORZ_INFO);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_START >> 2],
+ VPFE_VERT_START);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2],
+ VPFE_VERT_LINES);
+ vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2],
+ VPFE_HSIZE_OFF);
+}
+
+static int vpfe_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct vpfe_device *vpfe = platform_get_drvdata(pdev);
+ struct vpfe_ccdc *ccdc = &vpfe->ccdc;
+
+ /* if streaming has not started we don't care */
+ if (!vb2_start_streaming_called(&vpfe->buffer_queue))
+ return 0;
+
+ /* Enable both master and slave clock */
+ pm_runtime_get_sync(dev);
+ vpfe_config_enable(ccdc, 1);
+
+ /* Restore VPFE context */
+ vpfe_restore_context(ccdc);
+
+ vpfe_config_enable(ccdc, 0);
+ pm_runtime_put_sync(dev);
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+}
+
+#endif
+
+static SIMPLE_DEV_PM_OPS(vpfe_pm_ops, vpfe_suspend, vpfe_resume);
+
+static const struct of_device_id vpfe_of_match[] = {
+ { .compatible = "ti,am437x-vpfe", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, vpfe_of_match);
+
+static struct platform_driver vpfe_driver = {
+ .probe = vpfe_probe,
+ .remove = vpfe_remove,
+ .driver = {
+ .name = VPFE_MODULE_NAME,
+ .pm = &vpfe_pm_ops,
+ .of_match_table = of_match_ptr(vpfe_of_match),
+ },
+};
+
+module_platform_driver(vpfe_driver);
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("TI AM437x VPFE driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VPFE_VERSION);
diff --git a/drivers/media/platform/am437x/am437x-vpfe.h b/drivers/media/platform/am437x/am437x-vpfe.h
new file mode 100644
index 000000000000..0f557352313d
--- /dev/null
+++ b/drivers/media/platform/am437x/am437x-vpfe.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef AM437X_VPFE_H
+#define AM437X_VPFE_H
+
+#include <linux/am437x-vpfe.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "am437x-vpfe_regs.h"
+
+enum vpfe_pin_pol {
+ VPFE_PINPOL_POSITIVE = 0,
+ VPFE_PINPOL_NEGATIVE,
+};
+
+enum vpfe_hw_if_type {
+ /* Raw Bayer */
+ VPFE_RAW_BAYER = 0,
+ /* BT656 - 8 bit */
+ VPFE_BT656,
+ /* BT656 - 10 bit */
+ VPFE_BT656_10BIT,
+ /* YCbCr - 8 bit with external sync */
+ VPFE_YCBCR_SYNC_8,
+ /* YCbCr - 16 bit with external sync */
+ VPFE_YCBCR_SYNC_16,
+};
+
+/* interface description */
+struct vpfe_hw_if_param {
+ enum vpfe_hw_if_type if_type;
+ enum vpfe_pin_pol hdpol;
+ enum vpfe_pin_pol vdpol;
+ unsigned int bus_width;
+};
+
+#define VPFE_MAX_SUBDEV 1
+#define VPFE_MAX_INPUTS 1
+
+struct vpfe_pixel_format {
+ struct v4l2_fmtdesc fmtdesc;
+ /* bytes per pixel */
+ int bpp;
+};
+
+struct vpfe_std_info {
+ int active_pixels;
+ int active_lines;
+ /* current frame format */
+ int frame_format;
+};
+
+struct vpfe_route {
+ u32 input;
+ u32 output;
+};
+
+struct vpfe_subdev_info {
+ char name[32];
+ /* Sub device group id */
+ int grp_id;
+ /* inputs available at the sub device */
+ struct v4l2_input inputs[VPFE_MAX_INPUTS];
+ /* Sub dev routing information for each input */
+ struct vpfe_route *routes;
+ /* check if sub dev supports routing */
+ int can_route;
+ /* ccdc bus/interface configuration */
+ struct vpfe_hw_if_param vpfe_param;
+ struct v4l2_subdev *sd;
+};
+
+struct vpfe_config {
+ /* information about each subdev */
+ struct vpfe_subdev_info sub_devs[VPFE_MAX_SUBDEV];
+ /* Flat array, arranged in groups */
+ struct v4l2_async_subdev *asd[VPFE_MAX_SUBDEV];
+};
+
+struct vpfe_cap_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+enum ccdc_pixfmt {
+ CCDC_PIXFMT_RAW = 0,
+ CCDC_PIXFMT_YCBCR_16BIT,
+ CCDC_PIXFMT_YCBCR_8BIT,
+};
+
+enum ccdc_frmfmt {
+ CCDC_FRMFMT_PROGRESSIVE = 0,
+ CCDC_FRMFMT_INTERLACED,
+};
+
+/* PIXEL ORDER IN MEMORY from LSB to MSB */
+/* only applicable for 8-bit input mode */
+enum ccdc_pixorder {
+ CCDC_PIXORDER_YCBYCR,
+ CCDC_PIXORDER_CBYCRY,
+};
+
+enum ccdc_buftype {
+ CCDC_BUFTYPE_FLD_INTERLEAVED,
+ CCDC_BUFTYPE_FLD_SEPARATED
+};
+
+
+/* returns the highest bit used for the gamma */
+static inline u8 ccdc_gamma_width_max_bit(enum vpfe_ccdc_gamma_width width)
+{
+ return 15 - width;
+}
+
+/* returns the highest bit used for this data size */
+static inline u8 ccdc_data_size_max_bit(enum vpfe_ccdc_data_size sz)
+{
+ return sz == VPFE_CCDC_DATA_8BITS ? 7 : 15 - sz;
+}
+
+/* Structure for CCDC configuration parameters for raw capture mode */
+struct ccdc_params_raw {
+ /* pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* progressive or interlaced frame */
+ enum ccdc_frmfmt frm_fmt;
+ struct v4l2_rect win;
+ /* Current Format Bytes Per Pixels */
+ unsigned int bytesperpixel;
+ /* Current Format Bytes per Lines
+ * (Aligned to 32 bytes) used for HORZ_INFO
+ */
+ unsigned int bytesperline;
+ /* field id polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* vertical sync polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* horizontal sync polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* interleaved or separated fields */
+ enum ccdc_buftype buf_type;
+ /*
+ * enable to store the image in inverse
+ * order in memory(bottom to top)
+ */
+ unsigned char image_invert_enable;
+ /* configurable parameters */
+ struct vpfe_ccdc_config_params_raw config_params;
+};
+
+struct ccdc_params_ycbcr {
+ /* pixel format */
+ enum ccdc_pixfmt pix_fmt;
+ /* progressive or interlaced frame */
+ enum ccdc_frmfmt frm_fmt;
+ struct v4l2_rect win;
+ /* Current Format Bytes Per Pixels */
+ unsigned int bytesperpixel;
+ /* Current Format Bytes per Lines
+ * (Aligned to 32 bytes) used for HORZ_INFO
+ */
+ unsigned int bytesperline;
+ /* field id polarity */
+ enum vpfe_pin_pol fid_pol;
+ /* vertical sync polarity */
+ enum vpfe_pin_pol vd_pol;
+ /* horizontal sync polarity */
+ enum vpfe_pin_pol hd_pol;
+ /* enable BT.656 embedded sync mode */
+ int bt656_enable;
+ /* cb:y:cr:y or y:cb:y:cr in memory */
+ enum ccdc_pixorder pix_order;
+ /* interleaved or separated fields */
+ enum ccdc_buftype buf_type;
+};
+
+/*
+ * CCDC operational configuration
+ */
+struct ccdc_config {
+ /* CCDC interface type */
+ enum vpfe_hw_if_type if_type;
+ /* Raw Bayer configuration */
+ struct ccdc_params_raw bayer;
+ /* YCbCr configuration */
+ struct ccdc_params_ycbcr ycbcr;
+ /* ccdc base address */
+ void __iomem *base_addr;
+};
+
+struct vpfe_ccdc {
+ struct ccdc_config ccdc_cfg;
+ u32 ccdc_ctx[VPFE_REG_END / sizeof(u32)];
+};
+
+struct vpfe_device {
+ /* V4l2 specific parameters */
+ /* Identifies video device for this channel */
+ struct video_device *video_dev;
+ /* sub devices */
+ struct v4l2_subdev **sd;
+ /* vpfe cfg */
+ struct vpfe_config *cfg;
+ /* V4l2 device */
+ struct v4l2_device v4l2_dev;
+ /* parent device */
+ struct device *pdev;
+ /* subdevice async Notifier */
+ struct v4l2_async_notifier notifier;
+ /* Indicates id of the field which is being displayed */
+ unsigned field;
+ unsigned sequence;
+ /* current interface type */
+ struct vpfe_hw_if_param vpfe_if_params;
+ /* ptr to currently selected sub device */
+ struct vpfe_subdev_info *current_subdev;
+ /* current input at the sub device */
+ int current_input;
+ /* Keeps track of the information about the standard */
+ struct vpfe_std_info std_info;
+ /* std index into std table */
+ int std_index;
+ /* IRQs used when CCDC output to SDRAM */
+ unsigned int irq;
+ /* Pointer pointing to current v4l2_buffer */
+ struct vpfe_cap_buffer *cur_frm;
+ /* Pointer pointing to next v4l2_buffer */
+ struct vpfe_cap_buffer *next_frm;
+ /* Used to store pixel format */
+ struct v4l2_format fmt;
+ /* Used to store current bytes per pixel based on current format */
+ unsigned int bpp;
+ /*
+ * used when IMP is chained to store the crop window which
+ * is different from the image window
+ */
+ struct v4l2_rect crop;
+ /* Buffer queue used in video-buf */
+ struct vb2_queue buffer_queue;
+ /* Allocator-specific contexts for each plane */
+ struct vb2_alloc_ctx *alloc_ctx;
+ /* Queue of filled frames */
+ struct list_head dma_queue;
+ /* IRQ lock for DMA queue */
+ spinlock_t dma_queue_lock;
+ /* lock used to access this structure */
+ struct mutex lock;
+ /*
+ * offset where second field starts from the starting of the
+ * buffer for field separated YCbCr formats
+ */
+ u32 field_off;
+ struct vpfe_ccdc ccdc;
+};
+
+#endif /* AM437X_VPFE_H */
diff --git a/drivers/media/platform/am437x/am437x-vpfe_regs.h b/drivers/media/platform/am437x/am437x-vpfe_regs.h
new file mode 100644
index 000000000000..4a0ed29723e8
--- /dev/null
+++ b/drivers/media/platform/am437x/am437x-vpfe_regs.h
@@ -0,0 +1,140 @@
+/*
+ * TI AM437x Image Sensor Interface Registers
+ *
+ * Copyright (C) 2013 - 2014 Texas Instruments, Inc.
+ *
+ * Benoit Parrot <bparrot@ti.com>
+ * Lad, Prabhakar <prabhakar.csengg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef AM437X_VPFE_REGS_H
+#define AM437X_VPFE_REGS_H
+
+/* VPFE module register offset */
+#define VPFE_REVISION 0x0
+#define VPFE_PCR 0x4
+#define VPFE_SYNMODE 0x8
+#define VPFE_HD_VD_WID 0xc
+#define VPFE_PIX_LINES 0x10
+#define VPFE_HORZ_INFO 0x14
+#define VPFE_VERT_START 0x18
+#define VPFE_VERT_LINES 0x1c
+#define VPFE_CULLING 0x20
+#define VPFE_HSIZE_OFF 0x24
+#define VPFE_SDOFST 0x28
+#define VPFE_SDR_ADDR 0x2c
+#define VPFE_CLAMP 0x30
+#define VPFE_DCSUB 0x34
+#define VPFE_COLPTN 0x38
+#define VPFE_BLKCMP 0x3c
+#define VPFE_VDINT 0x48
+#define VPFE_ALAW 0x4c
+#define VPFE_REC656IF 0x50
+#define VPFE_CCDCFG 0x54
+#define VPFE_DMA_CNTL 0x98
+#define VPFE_SYSCONFIG 0x104
+#define VPFE_CONFIG 0x108
+#define VPFE_IRQ_EOI 0x110
+#define VPFE_IRQ_STS_RAW 0x114
+#define VPFE_IRQ_STS 0x118
+#define VPFE_IRQ_EN_SET 0x11c
+#define VPFE_IRQ_EN_CLR 0x120
+#define VPFE_REG_END 0x124
+
+/* Define bit fields within selected registers */
+#define VPFE_FID_POL_MASK 1
+#define VPFE_FID_POL_SHIFT 4
+#define VPFE_HD_POL_MASK 1
+#define VPFE_HD_POL_SHIFT 3
+#define VPFE_VD_POL_MASK 1
+#define VPFE_VD_POL_SHIFT 2
+#define VPFE_HSIZE_OFF_MASK 0xffffffe0
+#define VPFE_32BYTE_ALIGN_VAL 31
+#define VPFE_FRM_FMT_MASK 0x1
+#define VPFE_FRM_FMT_SHIFT 7
+#define VPFE_DATA_SZ_MASK 7
+#define VPFE_DATA_SZ_SHIFT 8
+#define VPFE_PIX_FMT_MASK 3
+#define VPFE_PIX_FMT_SHIFT 12
+#define VPFE_VP2SDR_DISABLE 0xfffbffff
+#define VPFE_WEN_ENABLE (1 << 17)
+#define VPFE_SDR2RSZ_DISABLE 0xfff7ffff
+#define VPFE_VDHDEN_ENABLE (1 << 16)
+#define VPFE_LPF_ENABLE (1 << 14)
+#define VPFE_ALAW_ENABLE (1 << 3)
+#define VPFE_ALAW_GAMMA_WD_MASK 7
+#define VPFE_BLK_CLAMP_ENABLE (1 << 31)
+#define VPFE_BLK_SGAIN_MASK 0x1f
+#define VPFE_BLK_ST_PXL_MASK 0x7fff
+#define VPFE_BLK_ST_PXL_SHIFT 10
+#define VPFE_BLK_SAMPLE_LN_MASK 7
+#define VPFE_BLK_SAMPLE_LN_SHIFT 28
+#define VPFE_BLK_SAMPLE_LINE_MASK 7
+#define VPFE_BLK_SAMPLE_LINE_SHIFT 25
+#define VPFE_BLK_DC_SUB_MASK 0x03fff
+#define VPFE_BLK_COMP_MASK 0xff
+#define VPFE_BLK_COMP_GB_COMP_SHIFT 8
+#define VPFE_BLK_COMP_GR_COMP_SHIFT 16
+#define VPFE_BLK_COMP_R_COMP_SHIFT 24
+#define VPFE_LATCH_ON_VSYNC_DISABLE (1 << 15)
+#define VPFE_DATA_PACK_ENABLE (1 << 11)
+#define VPFE_HORZ_INFO_SPH_SHIFT 16
+#define VPFE_VERT_START_SLV0_SHIFT 16
+#define VPFE_VDINT_VDINT0_SHIFT 16
+#define VPFE_VDINT_VDINT1_MASK 0xffff
+#define VPFE_PPC_RAW 1
+#define VPFE_DCSUB_DEFAULT_VAL 0
+#define VPFE_CLAMP_DEFAULT_VAL 0
+#define VPFE_COLPTN_VAL 0xbb11bb11
+#define VPFE_TWO_BYTES_PER_PIXEL 2
+#define VPFE_INTERLACED_IMAGE_INVERT 0x4b6d
+#define VPFE_INTERLACED_NO_IMAGE_INVERT 0x0249
+#define VPFE_PROGRESSIVE_IMAGE_INVERT 0x4000
+#define VPFE_PROGRESSIVE_NO_IMAGE_INVERT 0
+#define VPFE_INTERLACED_HEIGHT_SHIFT 1
+#define VPFE_SYN_MODE_INPMOD_SHIFT 12
+#define VPFE_SYN_MODE_INPMOD_MASK 3
+#define VPFE_SYN_MODE_8BITS (7 << 8)
+#define VPFE_SYN_MODE_10BITS (6 << 8)
+#define VPFE_SYN_MODE_11BITS (5 << 8)
+#define VPFE_SYN_MODE_12BITS (4 << 8)
+#define VPFE_SYN_MODE_13BITS (3 << 8)
+#define VPFE_SYN_MODE_14BITS (2 << 8)
+#define VPFE_SYN_MODE_15BITS (1 << 8)
+#define VPFE_SYN_MODE_16BITS (0 << 8)
+#define VPFE_SYN_FLDMODE_MASK 1
+#define VPFE_SYN_FLDMODE_SHIFT 7
+#define VPFE_REC656IF_BT656_EN 3
+#define VPFE_SYN_MODE_VD_POL_NEGATIVE (1 << 2)
+#define VPFE_CCDCFG_Y8POS_SHIFT 11
+#define VPFE_CCDCFG_BW656_10BIT (1 << 5)
+#define VPFE_SDOFST_FIELD_INTERLEAVED 0x249
+#define VPFE_NO_CULLING 0xffff00ff
+#define VPFE_VDINT0 (1 << 0)
+#define VPFE_VDINT1 (1 << 1)
+#define VPFE_VDINT2 (1 << 2)
+#define VPFE_DMA_CNTL_OVERFLOW (1 << 31)
+
+#define VPFE_CONFIG_PCLK_INV_SHIFT 0
+#define VPFE_CONFIG_PCLK_INV_MASK 1
+#define VPFE_CONFIG_PCLK_INV_NOT_INV 0
+#define VPFE_CONFIG_PCLK_INV_INV 1
+#define VPFE_CONFIG_EN_SHIFT 1
+#define VPFE_CONFIG_EN_MASK 2
+#define VPFE_CONFIG_EN_DISABLE 0
+#define VPFE_CONFIG_EN_ENABLE 1
+#define VPFE_CONFIG_ST_SHIFT 2
+#define VPFE_CONFIG_ST_MASK 4
+#define VPFE_CONFIG_ST_OCP_ACTIVE 0
+#define VPFE_CONFIG_ST_OCP_STANDBY 1
+
+#endif /* AM437X_VPFE_REGS_H */
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index b4029ae293d3..856b542b35b9 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -718,6 +718,7 @@ static int coda_start_encoding(struct coda_ctx *ctx)
struct vb2_buffer *buf;
int gamma, ret, value;
u32 dst_fourcc;
+ int num_fb;
u32 stride;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
@@ -983,12 +984,14 @@ static int coda_start_encoding(struct coda_ctx *ctx)
v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
goto out;
}
+ num_fb = 2;
stride = q_data_src->bytesperline;
} else {
ctx->num_internal_frames = 0;
+ num_fb = 0;
stride = 0;
}
- coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
+ coda_write(dev, num_fb, CODA_CMD_SET_FRAME_BUF_NUM);
coda_write(dev, stride, CODA_CMD_SET_FRAME_BUF_STRIDE);
if (dev->devtype->product == CODA_7541) {
@@ -1316,8 +1319,10 @@ static void coda_seq_end_work(struct work_struct *work)
static void coda_bit_release(struct coda_ctx *ctx)
{
+ mutex_lock(&ctx->buffer_mutex);
coda_free_framebuffers(ctx);
coda_free_context_buffers(ctx);
+ mutex_unlock(&ctx->buffer_mutex);
}
const struct coda_context_ops coda_bit_encode_ops = {
@@ -1431,9 +1436,10 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
height = val & CODA7_PICHEIGHT_MASK;
}
- if (width > q_data_dst->width || height > q_data_dst->height) {
+ if (width > q_data_dst->bytesperline || height > q_data_dst->height) {
v4l2_err(&dev->v4l2_dev, "stream is %dx%d, not %dx%d\n",
- width, height, q_data_dst->width, q_data_dst->height);
+ width, height, q_data_dst->bytesperline,
+ q_data_dst->height);
return -EINVAL;
}
@@ -1565,6 +1571,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
struct vb2_buffer *dst_buf;
struct coda_dev *dev = ctx->dev;
struct coda_q_data *q_data_dst;
+ struct coda_buffer_meta *meta;
u32 reg_addr, reg_stride;
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -1643,12 +1650,12 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
coda_write(dev, ctx->iram_info.axi_sram_use,
CODA7_REG_BIT_AXI_SRAM_USE);
- if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) {
- struct coda_buffer_meta *meta;
+ meta = list_first_entry_or_null(&ctx->buffer_meta_list,
+ struct coda_buffer_meta, list);
+
+ if (meta && ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) {
/* If this is the last buffer in the bitstream, add padding */
- meta = list_first_entry(&ctx->buffer_meta_list,
- struct coda_buffer_meta, list);
if (meta->end == (ctx->bitstream_fifo.kfifo.in &
ctx->bitstream_fifo.kfifo.mask)) {
static unsigned char buf[512];
@@ -1665,6 +1672,9 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
coda_kfifo_sync_to_device_full(ctx);
+ /* Clear decode success flag */
+ coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
+
coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
return 0;
@@ -1821,6 +1831,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
memset(&ctx->frame_metas[decoded_idx], 0,
sizeof(struct coda_buffer_meta));
ctx->frame_metas[decoded_idx].sequence = val;
+ ctx->sequence_offset++;
}
mutex_unlock(&ctx->bitstream_mutex);
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 39330a70f752..6f32e6d6b156 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -37,6 +37,7 @@
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
#include "coda.h"
@@ -180,6 +181,7 @@ struct coda_video_device {
const char *name;
enum coda_inst_type type;
const struct coda_context_ops *ops;
+ bool direct;
u32 src_formats[CODA_MAX_FORMATS];
u32 dst_formats[CODA_MAX_FORMATS];
};
@@ -468,6 +470,18 @@ static int coda_try_pixelformat(struct coda_ctx *ctx, struct v4l2_format *f)
return 0;
}
+static unsigned int coda_estimate_sizeimage(struct coda_ctx *ctx, u32 sizeimage,
+ u32 width, u32 height)
+{
+ /*
+ * This is a rough estimate for sensible compressed buffer
+ * sizes (between 1 and 16 bits per pixel). This could be
+ * improved by better format specific worst case estimates.
+ */
+ return round_up(clamp(sizeimage, width * height / 8,
+ width * height * 2), PAGE_SIZE);
+}
+
static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
struct v4l2_format *f)
{
@@ -513,15 +527,10 @@ static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_MPEG4:
f->fmt.pix.bytesperline = 0;
- /*
- * This is a rough estimate for sensible compressed buffer
- * sizes (between 1 and 16 bits per pixel). This could be
- * improved by better format specific worst case estimates.
- */
- f->fmt.pix.sizeimage = round_up(clamp(f->fmt.pix.sizeimage,
- f->fmt.pix.width * f->fmt.pix.height / 8,
- f->fmt.pix.width * f->fmt.pix.height * 2),
- PAGE_SIZE);
+ f->fmt.pix.sizeimage = coda_estimate_sizeimage(ctx,
+ f->fmt.pix.sizeimage,
+ f->fmt.pix.width,
+ f->fmt.pix.height);
break;
default:
BUG();
@@ -592,7 +601,11 @@ static int coda_try_fmt_vid_out(struct file *file, void *priv,
if (ret < 0)
return ret;
- if (!f->fmt.pix.colorspace) {
+ switch (f->fmt.pix.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_JPEG:
+ break;
+ default:
if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_JPEG)
f->fmt.pix.colorspace = V4L2_COLORSPACE_JPEG;
else
@@ -670,6 +683,7 @@ static int coda_s_fmt_vid_out(struct file *file, void *priv,
ctx->colorspace = f->fmt.pix.colorspace;
+ memset(&f_cap, 0, sizeof(f_cap));
f_cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
coda_g_fmt(file, priv, &f_cap);
f_cap.fmt.pix.width = f->fmt.pix.width;
@@ -908,7 +922,8 @@ static void coda_pic_run_work(struct work_struct *work)
ctx->ops->finish_run(ctx);
}
- if (ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out))
+ if ((ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out)) &&
+ ctx->ops->seq_end_work)
queue_work(dev->workqueue, &ctx->seq_end_work);
mutex_unlock(&dev->coda_mutex);
@@ -939,15 +954,43 @@ static int coda_job_ready(void *m2m_priv)
return 0;
}
- if (ctx->hold ||
- ((ctx->inst_type == CODA_INST_DECODER) &&
- !v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
- (coda_get_bitstream_payload(ctx) < 512) &&
- !(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "%d: not ready: not enough bitstream data.\n",
- ctx->idx);
- return 0;
+ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) {
+ struct list_head *meta;
+ bool stream_end;
+ int num_metas;
+ int src_bufs;
+
+ if (ctx->hold && !v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: on hold for more buffers.\n",
+ ctx->idx);
+ return 0;
+ }
+
+ stream_end = ctx->bit_stream_param &
+ CODA_BIT_STREAM_END_FLAG;
+
+ num_metas = 0;
+ list_for_each(meta, &ctx->buffer_meta_list)
+ num_metas++;
+
+ src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx);
+
+ if (!stream_end && (num_metas + src_bufs) < 2) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: need 2 buffers available (%d, %d)\n",
+ ctx->idx, num_metas, src_bufs);
+ return 0;
+ }
+
+
+ if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) &&
+ !stream_end && (coda_get_bitstream_payload(ctx) < 512)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: not enough bitstream data (%d).\n",
+ ctx->idx, coda_get_bitstream_payload(ctx));
+ return 0;
+ }
}
if (ctx->aborting) {
@@ -1023,13 +1066,14 @@ static void coda_set_tiled_map_type(struct coda_ctx *ctx, int tiled_map_type)
static void set_default_params(struct coda_ctx *ctx)
{
- unsigned int max_w, max_h, size;
+ unsigned int max_w, max_h, usize, csize;
ctx->codec = coda_find_codec(ctx->dev, ctx->cvd->src_formats[0],
ctx->cvd->dst_formats[0]);
max_w = min(ctx->codec->max_w, 1920U);
max_h = min(ctx->codec->max_h, 1088U);
- size = max_w * max_h * 3 / 2;
+ usize = max_w * max_h * 3 / 2;
+ csize = coda_estimate_sizeimage(ctx, usize, max_w, max_h);
ctx->params.codec_mode = ctx->codec->mode;
ctx->colorspace = V4L2_COLORSPACE_REC709;
@@ -1044,14 +1088,14 @@ static void set_default_params(struct coda_ctx *ctx)
ctx->q_data[V4L2_M2M_DST].height = max_h;
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_YUV420) {
ctx->q_data[V4L2_M2M_SRC].bytesperline = max_w;
- ctx->q_data[V4L2_M2M_SRC].sizeimage = size;
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = usize;
ctx->q_data[V4L2_M2M_DST].bytesperline = 0;
- ctx->q_data[V4L2_M2M_DST].sizeimage = round_up(size, PAGE_SIZE);
+ ctx->q_data[V4L2_M2M_DST].sizeimage = csize;
} else {
ctx->q_data[V4L2_M2M_SRC].bytesperline = 0;
- ctx->q_data[V4L2_M2M_SRC].sizeimage = round_up(size, PAGE_SIZE);
+ ctx->q_data[V4L2_M2M_SRC].sizeimage = csize;
ctx->q_data[V4L2_M2M_DST].bytesperline = max_w;
- ctx->q_data[V4L2_M2M_DST].sizeimage = size;
+ ctx->q_data[V4L2_M2M_DST].sizeimage = usize;
}
ctx->q_data[V4L2_M2M_SRC].rect.width = max_w;
ctx->q_data[V4L2_M2M_SRC].rect.height = max_h;
@@ -1080,6 +1124,7 @@ static int coda_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
+ /* Set to vb2-dma-contig allocator context, ignored by vb2-vmalloc */
alloc_ctxs[0] = ctx->dev->alloc_ctx;
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
@@ -1109,6 +1154,7 @@ static int coda_buf_prepare(struct vb2_buffer *vb)
static void coda_buf_queue(struct vb2_buffer *vb)
{
struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_queue *vq = vb->vb2_queue;
struct coda_q_data *q_data;
q_data = get_q_data(ctx, vb->vb2_queue->type);
@@ -1117,8 +1163,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
* In the decoder case, immediately try to copy the buffer into the
* bitstream ringbuffer and mark it as ready to be dequeued.
*/
- if (ctx->inst_type == CODA_INST_DECODER &&
- vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (ctx->bitstream.size && vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
/*
* For backwards compatibility, queuing an empty buffer marks
* the stream end
@@ -1218,7 +1263,7 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
/* Allow BIT decoder device_run with no new buffers queued */
- if (ctx->inst_type == CODA_INST_DECODER)
+ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit)
v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true);
ctx->gopcounter = ctx->params.gop_size - 1;
@@ -1271,7 +1316,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
coda_bit_stream_end_flag(ctx);
- ctx->isequence = 0;
+ ctx->qsequence = 0;
while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
@@ -1290,6 +1335,10 @@ static void coda_stop_streaming(struct vb2_queue *q)
if (!ctx->streamon_out && !ctx->streamon_cap) {
struct coda_buffer_meta *meta;
+ if (ctx->ops->seq_end_work) {
+ queue_work(dev->workqueue, &ctx->seq_end_work);
+ flush_work(&ctx->seq_end_work);
+ }
mutex_lock(&ctx->bitstream_mutex);
while (!list_empty(&ctx->buffer_meta_list)) {
meta = list_first_entry(&ctx->buffer_meta_list,
@@ -1300,6 +1349,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
mutex_unlock(&ctx->bitstream_mutex);
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
+ ctx->initialized = 0;
ctx->runcounter = 0;
ctx->aborting = 0;
}
@@ -1521,8 +1571,8 @@ int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
- src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
+ src_vq->mem_ops = &vb2_vmalloc_memops;
ret = coda_queue_init(priv, src_vq);
if (ret)
@@ -1577,9 +1627,11 @@ static int coda_open(struct file *file)
ctx->cvd = to_coda_video_device(vdev);
ctx->inst_type = ctx->cvd->type;
ctx->ops = ctx->cvd->ops;
+ ctx->use_bit = !ctx->cvd->direct;
init_completion(&ctx->completion);
INIT_WORK(&ctx->pic_run_work, coda_pic_run_work);
- INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
+ if (ctx->ops->seq_end_work)
+ INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
@@ -1630,22 +1682,25 @@ static int coda_open(struct file *file)
ctx->fh.ctrl_handler = &ctx->ctrls;
- ret = coda_alloc_context_buf(ctx, &ctx->parabuf,
- CODA_PARA_BUF_SIZE, "parabuf");
- if (ret < 0) {
- v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
- goto err_dma_alloc;
+ if (ctx->use_bit) {
+ ret = coda_alloc_context_buf(ctx, &ctx->parabuf,
+ CODA_PARA_BUF_SIZE, "parabuf");
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
+ goto err_dma_alloc;
+ }
}
-
- ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
- ctx->bitstream.vaddr = dma_alloc_writecombine(
- &dev->plat_dev->dev, ctx->bitstream.size,
- &ctx->bitstream.paddr, GFP_KERNEL);
- if (!ctx->bitstream.vaddr) {
- v4l2_err(&dev->v4l2_dev,
- "failed to allocate bitstream ringbuffer");
- ret = -ENOMEM;
- goto err_dma_writecombine;
+ if (ctx->use_bit && ctx->inst_type == CODA_INST_DECODER) {
+ ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
+ ctx->bitstream.vaddr = dma_alloc_writecombine(
+ &dev->plat_dev->dev, ctx->bitstream.size,
+ &ctx->bitstream.paddr, GFP_KERNEL);
+ if (!ctx->bitstream.vaddr) {
+ v4l2_err(&dev->v4l2_dev,
+ "failed to allocate bitstream ringbuffer");
+ ret = -ENOMEM;
+ goto err_dma_writecombine;
+ }
}
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
@@ -1693,16 +1748,14 @@ static int coda_release(struct file *file)
v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Releasing instance %p\n",
ctx);
- debugfs_remove_recursive(ctx->debugfs_entry);
-
- if (ctx->inst_type == CODA_INST_DECODER)
+ if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit)
coda_bit_stream_end_flag(ctx);
/* If this instance is running, call .job_abort and wait for it to end */
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
/* In case the instance was not running, we still need to call SEQ_END */
- if (ctx->initialized) {
+ if (ctx->initialized && ctx->ops->seq_end_work) {
queue_work(dev->workqueue, &ctx->seq_end_work);
flush_work(&ctx->seq_end_work);
}
@@ -1728,6 +1781,7 @@ static int coda_release(struct file *file)
clear_bit(ctx->idx, &dev->instance_mask);
if (ctx->ops->release)
ctx->ops->release(ctx);
+ debugfs_remove_recursive(ctx->debugfs_entry);
kfree(ctx);
return 0;
@@ -1844,10 +1898,11 @@ static int coda_register_device(struct coda_dev *dev, int i)
{
struct video_device *vfd = &dev->vfd[i];
- if (i > ARRAY_SIZE(dev->vfd))
+ if (i >= dev->devtype->num_vdevs)
return -EINVAL;
- snprintf(vfd->name, sizeof(vfd->name), dev->devtype->vdevs[i]->name);
+ snprintf(vfd->name, sizeof(vfd->name), "%s",
+ dev->devtype->vdevs[i]->name);
vfd->fops = &coda_fops;
vfd->ioctl_ops = &coda_ioctl_ops;
vfd->release = video_device_release_empty,
@@ -2001,7 +2056,6 @@ static const struct coda_devtype coda_devdata[] = {
static struct platform_device_id coda_platform_ids[] = {
{ .name = "coda-imx27", .driver_data = CODA_IMX27 },
- { .name = "coda-imx53", .driver_data = CODA_IMX53 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, coda_platform_ids);
@@ -2142,6 +2196,7 @@ static int coda_probe(struct platform_device *pdev)
if (!dev->iram.vaddr) {
dev_warn(&pdev->dev, "unable to alloc iram\n");
} else {
+ memset(dev->iram.vaddr, 0, dev->iram.size);
dev->iram.blob.data = dev->iram.vaddr;
dev->iram.blob.size = dev->iram.size;
dev->iram.dentry = debugfs_create_blob("iram", 0644,
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 5dd47e5f97c1..0c35cd5032ff 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -198,7 +198,6 @@ struct coda_ctx {
int initialized;
int streamon_out;
int streamon_cap;
- u32 isequence;
u32 qsequence;
u32 osequence;
u32 sequence_offset;
@@ -236,6 +235,7 @@ struct coda_ctx {
u32 frame_mem_ctrl;
int display_idx;
struct dentry *debugfs_entry;
+ bool use_bit;
};
extern int coda_debug;
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index 8e015b8aa8fa..7d026241171b 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -304,9 +304,9 @@
#define CODA_RATECONTROL_AUTOSKIP_OFFSET 31
#define CODA_RATECONTROL_AUTOSKIP_MASK 0x01
#define CODA_RATECONTROL_INITIALDELAY_OFFSET 16
-#define CODA_RATECONTROL_INITIALDELAY_MASK 0x7f
+#define CODA_RATECONTROL_INITIALDELAY_MASK 0x7fff
#define CODA_RATECONTROL_BITRATE_OFFSET 1
-#define CODA_RATECONTROL_BITRATE_MASK 0x7f
+#define CODA_RATECONTROL_BITRATE_MASK 0x7fff
#define CODA_RATECONTROL_ENABLE_OFFSET 0
#define CODA_RATECONTROL_ENABLE_MASK 0x01
#define CODA_CMD_ENC_SEQ_RC_BUF_SIZE 0x1b0
diff --git a/drivers/media/platform/davinci/Kconfig b/drivers/media/platform/davinci/Kconfig
index d9e1ddb586b1..469e9d28cec0 100644
--- a/drivers/media/platform/davinci/Kconfig
+++ b/drivers/media/platform/davinci/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_DAVINCI_VPIF_DISPLAY
tristate "TI DaVinci VPIF V4L2-Display driver"
- depends on VIDEO_DEV
+ depends on VIDEO_V4L2
depends on ARCH_DAVINCI || COMPILE_TEST
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
@@ -16,7 +16,7 @@ config VIDEO_DAVINCI_VPIF_DISPLAY
config VIDEO_DAVINCI_VPIF_CAPTURE
tristate "TI DaVinci VPIF video capture driver"
- depends on VIDEO_DEV
+ depends on VIDEO_V4L2
depends on ARCH_DAVINCI || COMPILE_TEST
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
@@ -75,7 +75,7 @@ config VIDEO_DM365_ISIF
config VIDEO_DAVINCI_VPBE_DISPLAY
tristate "TI DaVinci VPBE V4L2-Display driver"
- depends on ARCH_DAVINCI
+ depends on VIDEO_V4L2 && ARCH_DAVINCI
depends on HAS_DMA
select VIDEOBUF2_DMA_CONTIG
help
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 0abdb17fb19c..fa572aacdb3f 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -466,18 +466,6 @@ static inline void gsc_hw_clear_irq(struct gsc_dev *dev, int irq)
writel(cfg, dev->regs + GSC_IRQ);
}
-static inline void gsc_lock(struct vb2_queue *vq)
-{
- struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_lock(&ctx->gsc_dev->lock);
-}
-
-static inline void gsc_unlock(struct vb2_queue *vq)
-{
- struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
- mutex_unlock(&ctx->gsc_dev->lock);
-}
-
static inline bool gsc_ctx_state_is_set(u32 mask, struct gsc_ctx *ctx)
{
unsigned long flags;
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 74e1de637e8f..d5cffef2e227 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -267,8 +267,8 @@ static struct vb2_ops gsc_m2m_qops = {
.queue_setup = gsc_m2m_queue_setup,
.buf_prepare = gsc_m2m_buf_prepare,
.buf_queue = gsc_m2m_buf_queue,
- .wait_prepare = gsc_unlock,
- .wait_finish = gsc_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.stop_streaming = gsc_m2m_stop_streaming,
.start_streaming = gsc_m2m_start_streaming,
};
@@ -590,6 +590,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->gsc_dev->lock;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -603,6 +604,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->gsc_dev->lock;
return vb2_queue_init(dst_vq);
}
diff --git a/drivers/media/platform/marvell-ccic/Kconfig b/drivers/media/platform/marvell-ccic/Kconfig
index 6265d36adceb..4bf5bd1e90d6 100644
--- a/drivers/media/platform/marvell-ccic/Kconfig
+++ b/drivers/media/platform/marvell-ccic/Kconfig
@@ -5,6 +5,7 @@ config VIDEO_CAFE_CCIC
select VIDEO_OV7670
select VIDEOBUF2_VMALLOC
select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_DMA_SG
---help---
This is a video4linux2 driver for the Marvell 88ALP01 integrated
CMOS camera controller. This is the controller found on first-
@@ -13,7 +14,7 @@ config VIDEO_CAFE_CCIC
config VIDEO_MMP_CAMERA
tristate "Marvell Armada 610 integrated camera controller support"
depends on ARCH_MMP && I2C && VIDEO_V4L2
- depends on HAS_DMA
+ depends on HAS_DMA && BROKEN
select VIDEO_OV7670
select I2C_GPIO
select VIDEOBUF2_DMA_SG
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 193373ff268d..dd5b1415f974 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1913,7 +1913,6 @@ int mccic_register(struct mcam_camera *cam)
mutex_lock(&cam->s_mutex);
cam->vdev = mcam_v4l_template;
- cam->vdev.debug = 0;
cam->vdev.v4l2_dev = &cam->v4l2_dev;
video_set_drvdata(&cam->vdev, cam);
ret = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 51c2129bdcc6..deca80903c3a 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -220,6 +220,9 @@ static u32 isp_xclk_calc_divider(unsigned long *rate, unsigned long parent_rate)
return ISPTCTRL_CTRL_DIV_BYPASS;
}
+ if (*rate == 0)
+ *rate = 1;
+
divider = DIV_ROUND_CLOSEST(parent_rate, *rate);
if (divider >= ISPTCTRL_CTRL_DIV_BYPASS)
divider = ISPTCTRL_CTRL_DIV_BYPASS - 1;
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index aa40c8269ab8..54479d60cc0d 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -536,24 +536,12 @@ static void buffer_queue(struct vb2_buffer *vb)
spin_unlock_irqrestore(&camif->slock, flags);
}
-static void camif_lock(struct vb2_queue *vq)
-{
- struct camif_vp *vp = vb2_get_drv_priv(vq);
- mutex_lock(&vp->camif->lock);
-}
-
-static void camif_unlock(struct vb2_queue *vq)
-{
- struct camif_vp *vp = vb2_get_drv_priv(vq);
- mutex_unlock(&vp->camif->lock);
-}
-
static const struct vb2_ops s3c_camif_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
- .wait_prepare = camif_unlock,
- .wait_finish = camif_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
};
@@ -1161,6 +1149,7 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
q->buf_struct_size = sizeof(struct camif_buffer);
q->drv_priv = vp;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &vp->camif->lock;
ret = vb2_queue_init(q);
if (ret)
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 47ba8fbb0426..ec3e1248923d 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/version.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index fbfdf03b9054..8e44a59d8ec2 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -810,6 +810,7 @@ static int s5p_mfc_open(struct file *file)
q = &ctx->vq_dst;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
q->drv_priv = &ctx->fh;
+ q->lock = &dev->mfc_mutex;
if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
q->ops = get_dec_queue_ops();
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index c6c3452ccca1..aebe4fd7f03a 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -18,7 +18,6 @@
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <linux/workqueue.h>
#include <media/v4l2-ctrls.h>
@@ -813,7 +812,7 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
unsigned long flags;
switch (cmd->cmd) {
- case V4L2_ENC_CMD_STOP:
+ case V4L2_DEC_CMD_STOP:
if (cmd->flags != 0)
return -EINVAL;
@@ -944,22 +943,6 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
return 0;
}
-static void s5p_mfc_unlock(struct vb2_queue *q)
-{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
- struct s5p_mfc_dev *dev = ctx->dev;
-
- mutex_unlock(&dev->mfc_mutex);
-}
-
-static void s5p_mfc_lock(struct vb2_queue *q)
-{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
- struct s5p_mfc_dev *dev = ctx->dev;
-
- mutex_lock(&dev->mfc_mutex);
-}
-
static int s5p_mfc_buf_init(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
@@ -1107,8 +1090,8 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
static struct vb2_ops s5p_mfc_dec_qops = {
.queue_setup = s5p_mfc_queue_setup,
- .wait_prepare = s5p_mfc_unlock,
- .wait_finish = s5p_mfc_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.buf_init = s5p_mfc_buf_init,
.start_streaming = s5p_mfc_start_streaming,
.stop_streaming = s5p_mfc_stop_streaming,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index bd64f1dcbdb5..e65993f4b901 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/v4l2-event.h>
#include <linux/workqueue.h>
@@ -1867,22 +1866,6 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
return 0;
}
-static void s5p_mfc_unlock(struct vb2_queue *q)
-{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
- struct s5p_mfc_dev *dev = ctx->dev;
-
- mutex_unlock(&dev->mfc_mutex);
-}
-
-static void s5p_mfc_lock(struct vb2_queue *q)
-{
- struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
- struct s5p_mfc_dev *dev = ctx->dev;
-
- mutex_lock(&dev->mfc_mutex);
-}
-
static int s5p_mfc_buf_init(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
@@ -2052,8 +2035,8 @@ static void s5p_mfc_buf_queue(struct vb2_buffer *vb)
static struct vb2_ops s5p_mfc_enc_qops = {
.queue_setup = s5p_mfc_queue_setup,
- .wait_prepare = s5p_mfc_unlock,
- .wait_finish = s5p_mfc_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.buf_init = s5p_mfc_buf_init,
.buf_prepare = s5p_mfc_buf_prepare,
.start_streaming = s5p_mfc_start_streaming,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 9aea179943ce..d826c58b5d53 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1340,11 +1340,7 @@ static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
/* FMO_ASO_CTRL - 0: Enable, 1: Disable */
reg |= (fmo_aso_ctrl << S5P_FIMV_D_OPT_FMO_ASO_CTRL_MASK_V6);
- /* When user sets desplay_delay to 0,
- * It works as "display_delay enable" and delay set to 0.
- * If user wants display_delay disable, It should be
- * set to negative value. */
- if (ctx->display_delay >= 0) {
+ if (ctx->display_delay_enable) {
reg |= (0x1 << S5P_FIMV_D_OPT_DDELAY_EN_SHIFT_V6);
writel(ctx->display_delay, mfc_regs->d_display_delay);
}
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index b4d2696501e4..72d4f2e1efc0 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -926,22 +926,6 @@ static void buf_queue(struct vb2_buffer *vb)
mxr_dbg(mdev, "queuing buffer\n");
}
-static void wait_lock(struct vb2_queue *vq)
-{
- struct mxr_layer *layer = vb2_get_drv_priv(vq);
-
- mxr_dbg(layer->mdev, "%s\n", __func__);
- mutex_lock(&layer->mutex);
-}
-
-static void wait_unlock(struct vb2_queue *vq)
-{
- struct mxr_layer *layer = vb2_get_drv_priv(vq);
-
- mxr_dbg(layer->mdev, "%s\n", __func__);
- mutex_unlock(&layer->mutex);
-}
-
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct mxr_layer *layer = vb2_get_drv_priv(vq);
@@ -1040,8 +1024,8 @@ static void stop_streaming(struct vb2_queue *vq)
static struct vb2_ops mxr_video_qops = {
.queue_setup = queue_setup,
.buf_queue = buf_queue,
- .wait_prepare = wait_unlock,
- .wait_finish = wait_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
};
@@ -1122,6 +1106,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
.ops = &mxr_video_qops,
.min_buffers_needed = 1,
.mem_ops = &vb2_dma_contig_memops,
+ .lock = &layer->mutex,
};
return layer;
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index aaa1f6f25a29..a901b6248557 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -242,20 +242,6 @@ static void sh_veu_job_abort(void *priv)
veu->aborting = true;
}
-static void sh_veu_lock(void *priv)
-{
- struct sh_veu_dev *veu = priv;
-
- mutex_lock(&veu->fop_lock);
-}
-
-static void sh_veu_unlock(void *priv)
-{
- struct sh_veu_dev *veu = priv;
-
- mutex_unlock(&veu->fop_lock);
-}
-
static void sh_veu_process(struct sh_veu_dev *veu,
struct vb2_buffer *src_buf,
struct vb2_buffer *dst_buf)
@@ -950,36 +936,28 @@ static void sh_veu_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(veu->m2m_ctx, vb);
}
-static void sh_veu_wait_prepare(struct vb2_queue *q)
-{
- sh_veu_unlock(vb2_get_drv_priv(q));
-}
-
-static void sh_veu_wait_finish(struct vb2_queue *q)
-{
- sh_veu_lock(vb2_get_drv_priv(q));
-}
-
static const struct vb2_ops sh_veu_qops = {
.queue_setup = sh_veu_queue_setup,
.buf_prepare = sh_veu_buf_prepare,
.buf_queue = sh_veu_buf_queue,
- .wait_prepare = sh_veu_wait_prepare,
- .wait_finish = sh_veu_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
+ struct sh_veu_dev *veu = priv;
int ret;
memset(src_vq, 0, sizeof(*src_vq));
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
- src_vq->drv_priv = priv;
+ src_vq->drv_priv = veu;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &sh_veu_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->lock = &veu->fop_lock;
ret = vb2_queue_init(src_vq);
if (ret < 0)
@@ -988,10 +966,11 @@ static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
memset(dst_vq, 0, sizeof(*dst_vq));
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
- dst_vq->drv_priv = priv;
+ dst_vq->drv_priv = veu;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &sh_veu_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->lock = &veu->fop_lock;
return vb2_queue_init(dst_vq);
}
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 6d885239b16a..8526bf5c8429 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -455,8 +455,8 @@ static struct vb2_ops isi_video_qops = {
.buf_queue = buffer_queue,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
- .wait_prepare = soc_camera_unlock,
- .wait_finish = soc_camera_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ------------------------------------------------------------------
@@ -465,6 +465,8 @@ static struct vb2_ops isi_video_qops = {
static int isi_camera_init_videobuf(struct vb2_queue *q,
struct soc_camera_device *icd)
{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP;
q->drv_priv = icd;
@@ -472,6 +474,7 @@ static int isi_camera_init_videobuf(struct vb2_queue *q,
q->ops = &isi_video_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &ici->host_lock;
return vb2_queue_init(q);
}
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 0b3299dee05d..3435fd2ca8ec 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -435,14 +435,16 @@ static struct vb2_ops mx3_videobuf_ops = {
.buf_queue = mx3_videobuf_queue,
.buf_cleanup = mx3_videobuf_release,
.buf_init = mx3_videobuf_init,
- .wait_prepare = soc_camera_unlock,
- .wait_finish = soc_camera_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.stop_streaming = mx3_stop_streaming,
};
static int mx3_camera_init_videobuf(struct vb2_queue *q,
struct soc_camera_device *icd)
{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->drv_priv = icd;
@@ -450,6 +452,7 @@ static int mx3_camera_init_videobuf(struct vb2_queue *q,
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct mx3_camera_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &ici->host_lock;
return vb2_queue_init(q);
}
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 9f1473c0a0cf..279ab9f6ae38 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -804,62 +804,26 @@ error:
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
-static void rcar_vin_videobuf_release(struct vb2_buffer *vb)
+/*
+ * Wait for capture to stop and all in-flight buffers to be finished with by
+ * the video hardware. This must be called under &priv->lock
+ *
+ */
+static void rcar_vin_wait_stop_streaming(struct rcar_vin_priv *priv)
{
- struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- struct rcar_vin_priv *priv = ici->priv;
- unsigned int i;
- int buf_in_use = 0;
-
- spin_lock_irq(&priv->lock);
-
- /* Is the buffer in use by the VIN hardware? */
- for (i = 0; i < MAX_BUFFER_NUM; i++) {
- if (priv->queue_buf[i] == vb) {
- buf_in_use = 1;
- break;
- }
- }
-
- if (buf_in_use) {
- while (priv->state != STOPPED) {
-
- /* issue stop if running */
- if (priv->state == RUNNING)
- rcar_vin_request_capture_stop(priv);
+ while (priv->state != STOPPED) {
+ /* issue stop if running */
+ if (priv->state == RUNNING)
+ rcar_vin_request_capture_stop(priv);
- /* wait until capturing has been stopped */
- if (priv->state == STOPPING) {
- priv->request_to_stop = true;
- spin_unlock_irq(&priv->lock);
- wait_for_completion(&priv->capture_stop);
- spin_lock_irq(&priv->lock);
- }
- }
- /*
- * Capturing has now stopped. The buffer we have been asked
- * to release could be any of the current buffers in use, so
- * release all buffers that are in use by HW
- */
- for (i = 0; i < MAX_BUFFER_NUM; i++) {
- if (priv->queue_buf[i]) {
- vb2_buffer_done(priv->queue_buf[i],
- VB2_BUF_STATE_ERROR);
- priv->queue_buf[i] = NULL;
- }
+ /* wait until capturing has been stopped */
+ if (priv->state == STOPPING) {
+ priv->request_to_stop = true;
+ spin_unlock_irq(&priv->lock);
+ wait_for_completion(&priv->capture_stop);
+ spin_lock_irq(&priv->lock);
}
- } else {
- list_del_init(to_buf_list(vb));
}
-
- spin_unlock_irq(&priv->lock);
-}
-
-static int rcar_vin_videobuf_init(struct vb2_buffer *vb)
-{
- INIT_LIST_HEAD(to_buf_list(vb));
- return 0;
}
static void rcar_vin_stop_streaming(struct vb2_queue *vq)
@@ -868,21 +832,34 @@ static void rcar_vin_stop_streaming(struct vb2_queue *vq)
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
struct list_head *buf_head, *tmp;
+ int i;
spin_lock_irq(&priv->lock);
- list_for_each_safe(buf_head, tmp, &priv->capture)
+ rcar_vin_wait_stop_streaming(priv);
+
+ for (i = 0; i < MAX_BUFFER_NUM; i++) {
+ if (priv->queue_buf[i]) {
+ vb2_buffer_done(priv->queue_buf[i],
+ VB2_BUF_STATE_ERROR);
+ priv->queue_buf[i] = NULL;
+ }
+ }
+
+ list_for_each_safe(buf_head, tmp, &priv->capture) {
+ vb2_buffer_done(&list_entry(buf_head,
+ struct rcar_vin_buffer, list)->vb,
+ VB2_BUF_STATE_ERROR);
list_del_init(buf_head);
+ }
spin_unlock_irq(&priv->lock);
}
static struct vb2_ops rcar_vin_vb2_ops = {
.queue_setup = rcar_vin_videobuf_setup,
- .buf_init = rcar_vin_videobuf_init,
- .buf_cleanup = rcar_vin_videobuf_release,
.buf_queue = rcar_vin_videobuf_queue,
.stop_streaming = rcar_vin_stop_streaming,
- .wait_prepare = soc_camera_unlock,
- .wait_finish = soc_camera_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static irqreturn_t rcar_vin_irq(int irq, void *data)
@@ -1808,6 +1785,8 @@ static int rcar_vin_querycap(struct soc_camera_host *ici,
static int rcar_vin_init_videobuf2(struct vb2_queue *vq,
struct soc_camera_device *icd)
{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vq->io_modes = VB2_MMAP | VB2_USERPTR;
vq->drv_priv = icd;
@@ -1815,6 +1794,7 @@ static int rcar_vin_init_videobuf2(struct vb2_queue *vq,
vq->mem_ops = &vb2_dma_contig_memops;
vq->buf_struct_size = sizeof(struct rcar_vin_buffer);
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vq->lock = &ici->host_lock;
return vb2_queue_init(vq);
}
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 71787702d4a2..9ce202f53934 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -496,8 +496,8 @@ static struct vb2_ops sh_mobile_ceu_videobuf_ops = {
.buf_queue = sh_mobile_ceu_videobuf_queue,
.buf_cleanup = sh_mobile_ceu_videobuf_release,
.buf_init = sh_mobile_ceu_videobuf_init,
- .wait_prepare = soc_camera_unlock,
- .wait_finish = soc_camera_lock,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
.stop_streaming = sh_mobile_ceu_stop_streaming,
};
@@ -1661,6 +1661,8 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
struct soc_camera_device *icd)
{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->drv_priv = icd;
@@ -1668,6 +1670,7 @@ static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &ici->host_lock;
return vb2_queue_init(q);
}
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index b3db51c82bde..cee7b56f8404 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -843,22 +843,6 @@ static unsigned int soc_camera_poll(struct file *file, poll_table *pt)
return res;
}
-void soc_camera_lock(struct vb2_queue *vq)
-{
- struct soc_camera_device *icd = vb2_get_drv_priv(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- mutex_lock(&ici->host_lock);
-}
-EXPORT_SYMBOL(soc_camera_lock);
-
-void soc_camera_unlock(struct vb2_queue *vq)
-{
- struct soc_camera_device *icd = vb2_get_drv_priv(vq);
- struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
- mutex_unlock(&ici->host_lock);
-}
-EXPORT_SYMBOL(soc_camera_unlock);
-
static struct v4l2_file_operations soc_camera_fops = {
.owner = THIS_MODULE,
.open = soc_camera_open,
@@ -1813,8 +1797,6 @@ eadddev:
mutex_unlock(&ici->clk_lock);
}
eadd:
- video_device_release(icd->vdev);
- icd->vdev = NULL;
if (icd->vdev) {
video_device_release(icd->vdev);
icd->vdev = NULL;
diff --git a/drivers/media/platform/ti-vpe/vpe.c b/drivers/media/platform/ti-vpe/vpe.c
index d628d1a7cf9e..c44760b705da 100644
--- a/drivers/media/platform/ti-vpe/vpe.c
+++ b/drivers/media/platform/ti-vpe/vpe.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/ioctl.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
@@ -74,7 +75,7 @@
#define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */
/*
- * each VPE context can need up to 3 config desciptors, 7 input descriptors,
+ * each VPE context can need up to 3 config descriptors, 7 input descriptors,
* 3 output descriptors, and 10 control descriptors
*/
#define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \
@@ -373,7 +374,6 @@ struct vpe_dev {
struct vpe_ctx {
struct v4l2_fh fh;
struct vpe_dev *dev;
- struct v4l2_m2m_ctx *m2m_ctx;
struct v4l2_ctrl_handler hdl;
unsigned int field; /* current field */
@@ -887,10 +887,10 @@ static int job_ready(void *priv)
if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
needed += 2; /* need additional two most recent fields */
- if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
+ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < needed)
return 0;
- if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < needed)
+ if (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < needed)
return 0;
return 1;
@@ -1100,15 +1100,15 @@ static void device_run(void *priv)
struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
- ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
WARN_ON(ctx->src_vbs[2] == NULL);
- ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
WARN_ON(ctx->src_vbs[1] == NULL);
}
- ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
WARN_ON(ctx->src_vbs[0] == NULL);
- ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
WARN_ON(ctx->dst_vb == NULL);
/* config descriptors */
@@ -1334,7 +1334,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
finished:
vpe_dbg(ctx->dev, "finishing transaction\n");
ctx->bufs_completed = 0;
- v4l2_m2m_job_finish(dev->m2m_dev, ctx->m2m_ctx);
+ v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
handled:
return IRQ_HANDLED;
}
@@ -1395,7 +1395,7 @@ static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
struct vpe_q_data *q_data;
int i;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -1527,7 +1527,7 @@ static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
struct vb2_queue *vq;
int i;
- vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
@@ -1739,52 +1739,6 @@ static int vpe_s_selection(struct file *file, void *fh,
return set_srcdst_params(ctx);
}
-static int vpe_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *reqbufs)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
-}
-
-static int vpe_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vpe_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vpe_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
-}
-
-static int vpe_streamon(struct file *file, void *priv, enum v4l2_buf_type type)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
-}
-
-static int vpe_streamoff(struct file *file, void *priv, enum v4l2_buf_type type)
-{
- struct vpe_ctx *ctx = file2ctx(file);
-
- vpe_dump_regs(ctx->dev);
- vpdma_dump_regs(ctx->dev->vpdma);
-
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
-}
-
/*
* defines number of buffers/frames a context can process with VPE before
* switching to a different context. default value is 1 buffer per context
@@ -1814,14 +1768,14 @@ static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
};
static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
- .vidioc_querycap = vpe_querycap,
+ .vidioc_querycap = vpe_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
+ .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
.vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt,
.vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt,
.vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt,
- .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
+ .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
.vidioc_g_fmt_vid_out_mplane = vpe_g_fmt,
.vidioc_try_fmt_vid_out_mplane = vpe_try_fmt,
.vidioc_s_fmt_vid_out_mplane = vpe_s_fmt,
@@ -1829,16 +1783,15 @@ static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
.vidioc_g_selection = vpe_g_selection,
.vidioc_s_selection = vpe_s_selection,
- .vidioc_reqbufs = vpe_reqbufs,
- .vidioc_querybuf = vpe_querybuf,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
- .vidioc_qbuf = vpe_qbuf,
- .vidioc_dqbuf = vpe_dqbuf,
-
- .vidioc_streamon = vpe_streamon,
- .vidioc_streamoff = vpe_streamoff,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*
@@ -1910,33 +1863,40 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
static void vpe_buf_queue(struct vb2_buffer *vb)
{
struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb);
}
-static void vpe_wait_prepare(struct vb2_queue *q)
+static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct vpe_ctx *ctx = vb2_get_drv_priv(q);
- vpe_unlock(ctx);
+ /* currently we do nothing here */
+
+ return 0;
}
-static void vpe_wait_finish(struct vb2_queue *q)
+static void vpe_stop_streaming(struct vb2_queue *q)
{
struct vpe_ctx *ctx = vb2_get_drv_priv(q);
- vpe_lock(ctx);
+
+ vpe_dump_regs(ctx->dev);
+ vpdma_dump_regs(ctx->dev->vpdma);
}
static struct vb2_ops vpe_qops = {
.queue_setup = vpe_queue_setup,
.buf_prepare = vpe_buf_prepare,
.buf_queue = vpe_buf_queue,
- .wait_prepare = vpe_wait_prepare,
- .wait_finish = vpe_wait_finish,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vpe_start_streaming,
+ .stop_streaming = vpe_stop_streaming,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct vpe_ctx *ctx = priv;
+ struct vpe_dev *dev = ctx->dev;
int ret;
memset(src_vq, 0, sizeof(*src_vq));
@@ -1947,6 +1907,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &vpe_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &dev->dev_mutex;
ret = vb2_queue_init(src_vq);
if (ret)
@@ -1960,6 +1921,7 @@ static int queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &vpe_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &dev->dev_mutex;
return vb2_queue_init(dst_vq);
}
@@ -1981,9 +1943,9 @@ static const struct v4l2_ctrl_config vpe_bufs_per_job = {
static int vpe_open(struct file *file)
{
struct vpe_dev *dev = video_drvdata(file);
- struct vpe_ctx *ctx = NULL;
struct vpe_q_data *s_q_data;
struct v4l2_ctrl_handler *hdl;
+ struct vpe_ctx *ctx;
int ret;
vpe_dbg(dev, "vpe_open\n");
@@ -2056,10 +2018,10 @@ static int vpe_open(struct file *file)
if (ret)
goto exit_fh;
- ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
- if (IS_ERR(ctx->m2m_ctx)) {
- ret = PTR_ERR(ctx->m2m_ctx);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
goto exit_fh;
}
@@ -2078,7 +2040,7 @@ static int vpe_open(struct file *file)
ctx->load_mmrs = true;
vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
- ctx, ctx->m2m_ctx);
+ ctx, ctx->fh.m2m_ctx);
mutex_unlock(&dev->dev_mutex);
@@ -2116,7 +2078,7 @@ static int vpe_release(struct file *file)
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->hdl);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
kfree(ctx);
@@ -2133,39 +2095,13 @@ static int vpe_release(struct file *file)
return 0;
}
-static unsigned int vpe_poll(struct file *file,
- struct poll_table_struct *wait)
-{
- struct vpe_ctx *ctx = file2ctx(file);
- struct vpe_dev *dev = ctx->dev;
- int ret;
-
- mutex_lock(&dev->dev_mutex);
- ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
- mutex_unlock(&dev->dev_mutex);
- return ret;
-}
-
-static int vpe_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct vpe_ctx *ctx = file2ctx(file);
- struct vpe_dev *dev = ctx->dev;
- int ret;
-
- if (mutex_lock_interruptible(&dev->dev_mutex))
- return -ERESTARTSYS;
- ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
- mutex_unlock(&dev->dev_mutex);
- return ret;
-}
-
static const struct v4l2_file_operations vpe_fops = {
.owner = THIS_MODULE,
.open = vpe_open,
.release = vpe_release,
- .poll = vpe_poll,
+ .poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
- .mmap = vpe_mmap,
+ .mmap = v4l2_m2m_fop_mmap,
};
static struct video_device vpe_videodev = {
@@ -2367,8 +2303,6 @@ static const struct of_device_id vpe_of_match[] = {
},
{},
};
-#else
-#define vpe_of_match NULL
#endif
static struct platform_driver vpe_pdrv = {
@@ -2376,7 +2310,7 @@ static struct platform_driver vpe_pdrv = {
.remove = vpe_remove,
.driver = {
.name = VPE_MODULE_NAME,
- .of_match_table = vpe_of_match,
+ .of_match_table = of_match_ptr(vpe_of_match),
},
};
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index 857e7866e8bc..32a798f2d953 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -689,7 +689,7 @@ static const struct v4l2_ctrl_config vivid_ctrl_max_edid_blocks = {
static const char * const vivid_ctrl_colorspace_strings[] = {
"SMPTE 170M",
- "REC 709",
+ "Rec. 709",
"sRGB",
"AdobeRGB",
"BT.2020",
@@ -716,7 +716,7 @@ static const char * const vivid_ctrl_ycbcr_enc_strings[] = {
"xvYCC 601",
"xvYCC 709",
"sYCC",
- "BT.2020 Non-Constant Luminance",
+ "BT.2020",
"BT.2020 Constant Luminance",
"SMPTE 240M",
NULL,
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
index fc9c6536ba02..34493f435d5a 100644
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ b/drivers/media/platform/vivid/vivid-tpg.c
@@ -352,13 +352,14 @@ static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
{ COEFF(0.5, 224), COEFF(-0.4629, 224), COEFF(-0.0405, 224) },
};
bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
+ unsigned y_offset = full ? 0 : 16;
int lin_y, yc;
switch (tpg->real_ycbcr_enc) {
case V4L2_YCBCR_ENC_601:
case V4L2_YCBCR_ENC_XV601:
case V4L2_YCBCR_ENC_SYCC:
- rgb2ycbcr(full ? bt601_full : bt601, r, g, b, 16, y, cb, cr);
+ rgb2ycbcr(full ? bt601_full : bt601, r, g, b, y_offset, y, cb, cr);
break;
case V4L2_YCBCR_ENC_BT2020:
rgb2ycbcr(bt2020, r, g, b, 16, y, cb, cr);
@@ -384,7 +385,7 @@ static void color_to_ycbcr(struct tpg_data *tpg, int r, int g, int b,
case V4L2_YCBCR_ENC_709:
case V4L2_YCBCR_ENC_XV709:
default:
- rgb2ycbcr(full ? rec709_full : rec709, r, g, b, 0, y, cb, cr);
+ rgb2ycbcr(full ? rec709_full : rec709, r, g, b, y_offset, y, cb, cr);
break;
}
}
@@ -439,13 +440,14 @@ static void ycbcr_to_color(struct tpg_data *tpg, int y, int cb, int cr,
{ COEFF(1, 219), COEFF(1.8814, 224), COEFF(0, 224) },
};
bool full = tpg->real_quantization == V4L2_QUANTIZATION_FULL_RANGE;
+ unsigned y_offset = full ? 0 : 16;
int lin_r, lin_g, lin_b, lin_y;
switch (tpg->real_ycbcr_enc) {
case V4L2_YCBCR_ENC_601:
case V4L2_YCBCR_ENC_XV601:
case V4L2_YCBCR_ENC_SYCC:
- ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, 16, r, g, b);
+ ycbcr2rgb(full ? bt601_full : bt601, y, cb, cr, y_offset, r, g, b);
break;
case V4L2_YCBCR_ENC_BT2020:
ycbcr2rgb(bt2020, y, cb, cr, 16, r, g, b);
@@ -480,7 +482,7 @@ static void ycbcr_to_color(struct tpg_data *tpg, int y, int cb, int cr,
case V4L2_YCBCR_ENC_709:
case V4L2_YCBCR_ENC_XV709:
default:
- ycbcr2rgb(full ? rec709_full : rec709, y, cb, cr, 16, r, g, b);
+ ycbcr2rgb(full ? rec709_full : rec709, y, cb, cr, y_offset, r, g, b);
break;
}
}
diff --git a/drivers/media/platform/vivid/vivid-tpg.h b/drivers/media/platform/vivid/vivid-tpg.h
index 9dc463a40ed3..bd8b1c760b3f 100644
--- a/drivers/media/platform/vivid/vivid-tpg.h
+++ b/drivers/media/platform/vivid/vivid-tpg.h
@@ -20,7 +20,6 @@
#ifndef _VIVID_TPG_H_
#define _VIVID_TPG_H_
-#include <linux/version.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/random.h>
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
index 12467191dff4..989e96f7e360 100644
--- a/drivers/media/platform/vsp1/vsp1.h
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -16,7 +16,6 @@
#include <linux/io.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/platform_data/vsp1.h>
#include <media/media-device.h>
#include <media/v4l2-device.h>
@@ -40,9 +39,20 @@ struct vsp1_uds;
#define VSP1_MAX_UDS 3
#define VSP1_MAX_WPF 4
+#define VSP1_HAS_LIF (1 << 0)
+#define VSP1_HAS_LUT (1 << 1)
+#define VSP1_HAS_SRU (1 << 2)
+
+struct vsp1_platform_data {
+ unsigned int features;
+ unsigned int rpf_count;
+ unsigned int uds_count;
+ unsigned int wpf_count;
+};
+
struct vsp1_device {
struct device *dev;
- struct vsp1_platform_data *pdata;
+ struct vsp1_platform_data pdata;
void __iomem *mmio;
struct clk *clock;
diff --git a/drivers/media/platform/vsp1/vsp1_bru.c b/drivers/media/platform/vsp1/vsp1_bru.c
index b21f381a9862..401e2b77a0b6 100644
--- a/drivers/media/platform/vsp1/vsp1_bru.c
+++ b/drivers/media/platform/vsp1/vsp1_bru.c
@@ -20,7 +20,7 @@
#include "vsp1_bru.h"
#include "vsp1_rwpf.h"
-#define BRU_MIN_SIZE 4U
+#define BRU_MIN_SIZE 1U
#define BRU_MAX_SIZE 8190U
/* -----------------------------------------------------------------------------
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 5eb16e87d53f..913485a90e97 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -40,7 +40,7 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
irqreturn_t ret = IRQ_NONE;
unsigned int i;
- for (i = 0; i < vsp1->pdata->wpf_count; ++i) {
+ for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
struct vsp1_rwpf *wpf = vsp1->wpf[i];
struct vsp1_pipeline *pipe;
u32 status;
@@ -181,7 +181,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities);
- if (vsp1->pdata->features & VSP1_HAS_LIF) {
+ if (vsp1->pdata.features & VSP1_HAS_LIF) {
vsp1->lif = vsp1_lif_create(vsp1);
if (IS_ERR(vsp1->lif)) {
ret = PTR_ERR(vsp1->lif);
@@ -191,7 +191,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->lif->entity.list_dev, &vsp1->entities);
}
- if (vsp1->pdata->features & VSP1_HAS_LUT) {
+ if (vsp1->pdata.features & VSP1_HAS_LUT) {
vsp1->lut = vsp1_lut_create(vsp1);
if (IS_ERR(vsp1->lut)) {
ret = PTR_ERR(vsp1->lut);
@@ -201,7 +201,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities);
}
- for (i = 0; i < vsp1->pdata->rpf_count; ++i) {
+ for (i = 0; i < vsp1->pdata.rpf_count; ++i) {
struct vsp1_rwpf *rpf;
rpf = vsp1_rpf_create(vsp1, i);
@@ -214,7 +214,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&rpf->entity.list_dev, &vsp1->entities);
}
- if (vsp1->pdata->features & VSP1_HAS_SRU) {
+ if (vsp1->pdata.features & VSP1_HAS_SRU) {
vsp1->sru = vsp1_sru_create(vsp1);
if (IS_ERR(vsp1->sru)) {
ret = PTR_ERR(vsp1->sru);
@@ -224,7 +224,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities);
}
- for (i = 0; i < vsp1->pdata->uds_count; ++i) {
+ for (i = 0; i < vsp1->pdata.uds_count; ++i) {
struct vsp1_uds *uds;
uds = vsp1_uds_create(vsp1, i);
@@ -237,7 +237,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
list_add_tail(&uds->entity.list_dev, &vsp1->entities);
}
- for (i = 0; i < vsp1->pdata->wpf_count; ++i) {
+ for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
struct vsp1_rwpf *wpf;
wpf = vsp1_wpf_create(vsp1, i);
@@ -261,7 +261,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
goto done;
}
- if (vsp1->pdata->features & VSP1_HAS_LIF) {
+ if (vsp1->pdata.features & VSP1_HAS_LIF) {
ret = media_entity_create_link(
&vsp1->wpf[0]->entity.subdev.entity, RWPF_PAD_SOURCE,
&vsp1->lif->entity.subdev.entity, LIF_PAD_SINK, 0);
@@ -294,7 +294,7 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
/* Reset any channel that might be running. */
status = vsp1_read(vsp1, VI6_STATUS);
- for (i = 0; i < vsp1->pdata->wpf_count; ++i) {
+ for (i = 0; i < vsp1->pdata.wpf_count; ++i) {
unsigned int timeout;
if (!(status & VI6_STATUS_SYS_ACT(i)))
@@ -318,10 +318,10 @@ static int vsp1_device_init(struct vsp1_device *vsp1)
vsp1_write(vsp1, VI6_CLK_DCSWT, (8 << VI6_CLK_DCSWT_CSTPW_SHIFT) |
(8 << VI6_CLK_DCSWT_CSTRW_SHIFT));
- for (i = 0; i < vsp1->pdata->rpf_count; ++i)
+ for (i = 0; i < vsp1->pdata.rpf_count; ++i)
vsp1_write(vsp1, VI6_DPR_RPF_ROUTE(i), VI6_DPR_NODE_UNUSED);
- for (i = 0; i < vsp1->pdata->uds_count; ++i)
+ for (i = 0; i < vsp1->pdata.uds_count; ++i)
vsp1_write(vsp1, VI6_DPR_UDS_ROUTE(i), VI6_DPR_NODE_UNUSED);
vsp1_write(vsp1, VI6_DPR_SRU_ROUTE, VI6_DPR_NODE_UNUSED);
@@ -428,28 +428,36 @@ static const struct dev_pm_ops vsp1_pm_ops = {
* Platform Driver
*/
-static int vsp1_validate_platform_data(struct platform_device *pdev,
- struct vsp1_platform_data *pdata)
+static int vsp1_parse_dt(struct vsp1_device *vsp1)
{
- if (pdata == NULL) {
- dev_err(&pdev->dev, "missing platform data\n");
- return -EINVAL;
- }
+ struct device_node *np = vsp1->dev->of_node;
+ struct vsp1_platform_data *pdata = &vsp1->pdata;
+
+ if (of_property_read_bool(np, "renesas,has-lif"))
+ pdata->features |= VSP1_HAS_LIF;
+ if (of_property_read_bool(np, "renesas,has-lut"))
+ pdata->features |= VSP1_HAS_LUT;
+ if (of_property_read_bool(np, "renesas,has-sru"))
+ pdata->features |= VSP1_HAS_SRU;
+
+ of_property_read_u32(np, "renesas,#rpf", &pdata->rpf_count);
+ of_property_read_u32(np, "renesas,#uds", &pdata->uds_count);
+ of_property_read_u32(np, "renesas,#wpf", &pdata->wpf_count);
if (pdata->rpf_count <= 0 || pdata->rpf_count > VSP1_MAX_RPF) {
- dev_err(&pdev->dev, "invalid number of RPF (%u)\n",
+ dev_err(vsp1->dev, "invalid number of RPF (%u)\n",
pdata->rpf_count);
return -EINVAL;
}
if (pdata->uds_count <= 0 || pdata->uds_count > VSP1_MAX_UDS) {
- dev_err(&pdev->dev, "invalid number of UDS (%u)\n",
+ dev_err(vsp1->dev, "invalid number of UDS (%u)\n",
pdata->uds_count);
return -EINVAL;
}
if (pdata->wpf_count <= 0 || pdata->wpf_count > VSP1_MAX_WPF) {
- dev_err(&pdev->dev, "invalid number of WPF (%u)\n",
+ dev_err(vsp1->dev, "invalid number of WPF (%u)\n",
pdata->wpf_count);
return -EINVAL;
}
@@ -457,33 +465,6 @@ static int vsp1_validate_platform_data(struct platform_device *pdev,
return 0;
}
-static struct vsp1_platform_data *
-vsp1_get_platform_data(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct vsp1_platform_data *pdata;
-
- if (!IS_ENABLED(CONFIG_OF) || np == NULL)
- return pdev->dev.platform_data;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (pdata == NULL)
- return NULL;
-
- if (of_property_read_bool(np, "renesas,has-lif"))
- pdata->features |= VSP1_HAS_LIF;
- if (of_property_read_bool(np, "renesas,has-lut"))
- pdata->features |= VSP1_HAS_LUT;
- if (of_property_read_bool(np, "renesas,has-sru"))
- pdata->features |= VSP1_HAS_SRU;
-
- of_property_read_u32(np, "renesas,#rpf", &pdata->rpf_count);
- of_property_read_u32(np, "renesas,#uds", &pdata->uds_count);
- of_property_read_u32(np, "renesas,#wpf", &pdata->wpf_count);
-
- return pdata;
-}
-
static int vsp1_probe(struct platform_device *pdev)
{
struct vsp1_device *vsp1;
@@ -499,11 +480,7 @@ static int vsp1_probe(struct platform_device *pdev)
mutex_init(&vsp1->lock);
INIT_LIST_HEAD(&vsp1->entities);
- vsp1->pdata = vsp1_get_platform_data(pdev);
- if (vsp1->pdata == NULL)
- return -ENODEV;
-
- ret = vsp1_validate_platform_data(pdev, vsp1->pdata);
+ ret = vsp1_parse_dt(vsp1);
if (ret < 0)
return ret;
diff --git a/drivers/media/platform/vsp1/vsp1_hsit.c b/drivers/media/platform/vsp1/vsp1_hsit.c
index 80bedc554ee3..0bc0471746c9 100644
--- a/drivers/media/platform/vsp1/vsp1_hsit.c
+++ b/drivers/media/platform/vsp1/vsp1_hsit.c
@@ -26,11 +26,6 @@
* Device Access
*/
-static inline u32 vsp1_hsit_read(struct vsp1_hsit *hsit, u32 reg)
-{
- return vsp1_read(hsit->entity.vsp1, reg);
-}
-
static inline void vsp1_hsit_write(struct vsp1_hsit *hsit, u32 reg, u32 data)
{
vsp1_write(hsit->entity.vsp1, reg, data);
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index 55f163d32d15..da3c573e1efc 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -43,12 +43,12 @@
#define VI6_DISP_IRQ_ENB 0x0078
#define VI6_DISP_IRQ_ENB_DSTE (1 << 8)
#define VI6_DISP_IRQ_ENB_MAEE (1 << 5)
-#define VI6_DISP_IRQ_ENB_LNEE(n) (1 << ((n) + 4))
+#define VI6_DISP_IRQ_ENB_LNEE(n) (1 << (n))
#define VI6_DISP_IRQ_STA 0x007c
#define VI6_DISP_IRQ_STA_DSE (1 << 8)
#define VI6_DISP_IRQ_STA_MAE (1 << 5)
-#define VI6_DISP_IRQ_STA_LNE(n) (1 << ((n) + 4))
+#define VI6_DISP_IRQ_STA_LNE(n) (1 << (n))
#define VI6_WPF_LINE_COUNT(n) (0x0084 + (n) * 4)
#define VI6_WPF_LINE_COUNT_MASK (0x1fffff << 0)
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
index d14d26b718ef..3294529a3108 100644
--- a/drivers/media/platform/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -106,11 +106,22 @@ static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
+ crop->left * fmtinfo->bpp[0] / 8;
pstride = format->plane_fmt[0].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
+
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
+ rpf->buf_addr[0] + rpf->offsets[0]);
+
if (format->num_planes > 1) {
rpf->offsets[1] = crop->top * format->plane_fmt[1].bytesperline
+ crop->left * fmtinfo->bpp[1] / 8;
pstride |= format->plane_fmt[1].bytesperline
<< VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
+
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0,
+ rpf->buf_addr[1] + rpf->offsets[1]);
+
+ if (format->num_planes > 2)
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1,
+ rpf->buf_addr[2] + rpf->offsets[1]);
}
vsp1_rpf_write(rpf, VI6_RPF_SRCM_PSTRIDE, pstride);
@@ -179,6 +190,13 @@ static void rpf_vdev_queue(struct vsp1_video *video,
struct vsp1_video_buffer *buf)
{
struct vsp1_rwpf *rpf = container_of(video, struct vsp1_rwpf, video);
+ unsigned int i;
+
+ for (i = 0; i < 3; ++i)
+ rpf->buf_addr[i] = buf->addr[i];
+
+ if (!vsp1_entity_is_streaming(&rpf->entity))
+ return;
vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y,
buf->addr[0] + rpf->offsets[0]);
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
index 28dd9e7b3838..2cf1f13d3bf9 100644
--- a/drivers/media/platform/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -39,6 +39,7 @@ struct vsp1_rwpf {
struct v4l2_rect crop;
unsigned int offsets[2];
+ dma_addr_t buf_addr[3];
};
static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 6e057762c933..1d2b3a2f1573 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -92,19 +92,20 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
return 0;
}
- /* Sources. If the pipeline has a single input configure it as the
- * master layer. Otherwise configure all inputs as sub-layers and
- * select the virtual RPF as the master layer.
+ /* Sources. If the pipeline has a single input and BRU is not used,
+ * configure it as the master layer. Otherwise configure all
+ * inputs as sub-layers and select the virtual RPF as the master
+ * layer.
*/
for (i = 0; i < pipe->num_inputs; ++i) {
struct vsp1_rwpf *input = pipe->inputs[i];
- srcrpf |= pipe->num_inputs == 1
+ srcrpf |= (!pipe->bru && pipe->num_inputs == 1)
? VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index)
: VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
}
- if (pipe->num_inputs > 1)
+ if (pipe->bru || pipe->num_inputs > 1)
srcrpf |= VI6_WPF_SRCRPF_VIRACT_MST;
vsp1_wpf_write(wpf, VI6_WPF_SRCRPF, srcrpf);
@@ -280,7 +281,7 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
* except for the WPF0 source link if a LIF is present.
*/
flags = MEDIA_LNK_FL_ENABLED;
- if (!(vsp1->pdata->features & VSP1_HAS_LIF) || index != 0)
+ if (!(vsp1->pdata.features & VSP1_HAS_LIF) || index != 0)
flags |= MEDIA_LNK_FL_IMMUTABLE;
ret = media_entity_create_link(&wpf->entity.subdev.entity,