This thread has been locked.

If you have a related question, please click the "Ask a related question" button in the top right corner. The newly created question will be automatically linked to this question.

AM62P: ads6311 Frame loss may occur in the switching current of the radar

Part Number: AM62P

Tool/software:

sdk version 10.0.1

We found that during the switching process, there would be frame loss in the dma output of our 4-channel video. It causes the data of our four channels to be out of sync。

The code for statistical frames:

// SPDX-License-Identifier: GPL-2.0-only
/*
 * TI CSI2 RX driver.
 *
 * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
 *
 * Author: Pratyush Yadav <p.yadav@ti.com>
 */

#include <linux/bitfield.h>
#include <linux/dmaengine.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/adaps_dtof_uapi.h>

#include <media/mipi-csi2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
//#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-dma-sg.h>
#define TI_CSI2RX_MODULE_NAME		"j721e-csi2rx"

#define SHIM_CNTL			0x10
#define SHIM_CNTL_PIX_RST		BIT(0)

#define SHIM_DMACNTX(i)			(0x20 + ((i) * 0x20))
#define SHIM_DMACNTX_EN			BIT(31)
#define SHIM_DMACNTX_YUV422		GENMASK(27, 26)
#define SHIM_DMACNTX_SIZE		GENMASK(21, 20)
#define SHIM_DMACNTX_VC			GENMASK(9, 6)
#define SHIM_DMACNTX_FMT		GENMASK(5, 0)
#define SHIM_DMACNTX_YUV422_MODE_11	3
#define SHIM_DMACNTX_SIZE_8		0
#define SHIM_DMACNTX_SIZE_16		1
#define SHIM_DMACNTX_SIZE_32		2

#define SHIM_PSI_CFG0(i)		(0x24 + ((i) * 0x20))
#define SHIM_PSI_CFG0_SRC_TAG		GENMASK(15, 0)
#define SHIM_PSI_CFG0_DST_TAG		GENMASK(31, 16)

#define PSIL_WORD_SIZE_BYTES		16
#define TI_CSI2RX_MAX_CTX		32

/*
 * There are no hard limits on the width or height. The DMA engine can handle
 * all sizes. The max width and height are arbitrary numbers for this driver.
 * Use 16K * 16K as the arbitrary limit. It is large enough that it is unlikely
 * the limit will be hit in practice.
 */
#define MAX_WIDTH_BYTES			SZ_16K
#define MAX_HEIGHT_LINES		SZ_16K

#define TI_CSI2RX_PAD_SINK		0
#define TI_CSI2RX_PAD_FIRST_SOURCE	1
#define TI_CSI2RX_MAX_SOURCE_PADS	TI_CSI2RX_MAX_CTX
#define TI_CSI2RX_MAX_PADS		(1 + TI_CSI2RX_MAX_SOURCE_PADS)

#define DRAIN_TIMEOUT_MS		50
#define DRAIN_BUFFER_SIZE		SZ_32K

struct ti_csi2rx_fmt {
	u32				fourcc;	/* Four character code. */
	u32				code;	/* Mbus code. */
	u32				csi_dt;	/* CSI Data type. */
	u8				bpp;	/* Bits per pixel. */
	u8				size;	/* Data size shift when unpacking. */
};

struct ti_csi2rx_buffer {
	/* Common v4l2 buffer. Must be first. */
	struct vb2_v4l2_buffer		vb;
	struct list_head		list;
	struct ti_csi2rx_ctx		*ctx;
};

enum ti_csi2rx_dma_state {
	TI_CSI2RX_DMA_STOPPED,	/* Streaming not started yet. */
	TI_CSI2RX_DMA_IDLE,	/* Streaming but no pending DMA operation. */
	TI_CSI2RX_DMA_ACTIVE,	/* Streaming and pending DMA operation. */
};

struct ti_csi2rx_dma {
	/* Protects all fields in this struct. */
	spinlock_t			lock;
	struct dma_chan			*chan;
	/* Buffers queued to the driver, waiting to be processed by DMA. */
	struct list_head		queue;
	enum ti_csi2rx_dma_state	state;
	/*
	 * Queue of buffers submitted to DMA engine.
	 */
	struct list_head		submitted;
};

struct ti_csi2rx_dev;

struct ti_csi2rx_ctx {
	struct ti_csi2rx_dev		*csi;
	struct video_device		vdev;
	struct vb2_queue		vidq;
	struct mutex			mutex; /* To serialize ioctls. */
	struct v4l2_format		v_fmt;
	struct ti_csi2rx_dma		dma;
	struct media_pad		pad;
	u32				sequence;
	u32				idx;
	u32				vc;
	u32				stream;
	u32				ok_frame_cnt;
	u32				ng_frame_cnt;
	ktime_t			rx_start;
};

struct ti_csi2rx_dev {
	struct device			*dev;
	void __iomem			*shim;
	/* To serialize core subdev ioctls. */
	struct mutex			mutex;
	unsigned int			enable_count;
	bool					enable_multi_stream;
	unsigned int			num_ctx;
	struct v4l2_async_notifier	notifier;
	struct media_device		mdev;
	struct media_pipeline		pipe;
	struct media_pad		pads[TI_CSI2RX_MAX_PADS];
	struct v4l2_device		v4l2_dev;
	struct v4l2_subdev		*source;
	struct v4l2_subdev		subdev;
	struct ti_csi2rx_ctx		ctx[TI_CSI2RX_MAX_CTX];
	u64				enabled_streams_mask;
	/* Buffer to drain stale data from PSI-L endpoint */
	struct {
		void			*vaddr;
		dma_addr_t		paddr;
		size_t			len;
	} drain;
};

static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
	{
		.fourcc			= V4L2_PIX_FMT_SBGGR8,
		.code			= MEDIA_BUS_FMT_SBGGR8_1X8,
		.csi_dt			= MIPI_CSI2_DT_USER_DEFINED(0),
		.bpp 			= 8,
		.size			= SHIM_DMACNTX_SIZE_8,
	},
	{
		.fourcc			= V4L2_PIX_FMT_YUYV,
		.code			= MEDIA_BUS_FMT_YUYV8_1X16,
		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_8,
	}, {
		.fourcc			= V4L2_PIX_FMT_UYVY,
		.code			= MEDIA_BUS_FMT_UYVY8_1X16,
		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_8,
	}, {
		.fourcc			= V4L2_PIX_FMT_YVYU,
		.code			= MEDIA_BUS_FMT_YVYU8_1X16,
		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_8,
	}, {
		.fourcc			= V4L2_PIX_FMT_VYUY,
		.code			= MEDIA_BUS_FMT_VYUY8_1X16,
		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_8,
	}, {
		.fourcc			= V4L2_PIX_FMT_SBGGR8,
		.code			= MEDIA_BUS_FMT_SBGGR8_1X8,
		.csi_dt			= MIPI_CSI2_DT_RAW8,
		.bpp			= 8,
		.size			= SHIM_DMACNTX_SIZE_8,
	}, {
		.fourcc			= V4L2_PIX_FMT_SGBRG8,
		.code			= MEDIA_BUS_FMT_SGBRG8_1X8,
		.csi_dt			= MIPI_CSI2_DT_RAW8,
		.bpp			= 8,
		.size			= SHIM_DMACNTX_SIZE_8,
	}, {
		.fourcc			= V4L2_PIX_FMT_SGRBG8,
		.code			= MEDIA_BUS_FMT_SGRBG8_1X8,
		.csi_dt			= MIPI_CSI2_DT_RAW8,
		.bpp			= 8,
		.size			= SHIM_DMACNTX_SIZE_8,
	}, {
		.fourcc			= V4L2_PIX_FMT_SRGGB8,
		.code			= MEDIA_BUS_FMT_SRGGB8_1X8,
		.csi_dt			= MIPI_CSI2_DT_RAW8,
		.bpp			= 8,
		.size			= SHIM_DMACNTX_SIZE_8,
}, {
}, {
		.fourcc			= V4L2_PIX_FMT_GREY,
		.code			= MEDIA_BUS_FMT_Y8_1X8,
		.csi_dt			= MIPI_CSI2_DT_RAW8,
		.bpp			= 8,
		.size			= SHIM_DMACNTX_SIZE_8,
	}, {
	}, {
		.fourcc			= V4L2_PIX_FMT_GREY,
		.code			= MEDIA_BUS_FMT_Y8_1X8,
		.csi_dt			= MIPI_CSI2_DT_RAW8,
		.bpp			= 8,
		.size			= SHIM_DMACNTX_SIZE_8,
	}, {
		.fourcc			= V4L2_PIX_FMT_SBGGR10,
		.code			= MEDIA_BUS_FMT_SBGGR10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SGBRG10,
		.code			= MEDIA_BUS_FMT_SGBRG10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SGRBG10,
		.code			= MEDIA_BUS_FMT_SGRBG10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SRGGB10,
		.code			= MEDIA_BUS_FMT_SRGGB10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SBGGR12,
		.code			= MEDIA_BUS_FMT_SBGGR12_1X12,
		.csi_dt			= MIPI_CSI2_DT_RAW12,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SGBRG12,
		.code			= MEDIA_BUS_FMT_SGBRG12_1X12,
		.csi_dt			= MIPI_CSI2_DT_RAW12,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SGRBG12,
		.code			= MEDIA_BUS_FMT_SGRBG12_1X12,
		.csi_dt			= MIPI_CSI2_DT_RAW12,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SRGGB12,
		.code			= MEDIA_BUS_FMT_SRGGB12_1X12,
		.csi_dt			= MIPI_CSI2_DT_RAW12,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SRGGI10,
		.code			= MEDIA_BUS_FMT_SRGGI10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SGRIG10,
		.code			= MEDIA_BUS_FMT_SGRIG10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SBGGI10,
		.code			= MEDIA_BUS_FMT_SBGGI10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SGBIG10,
		.code			= MEDIA_BUS_FMT_SGBIG10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SGIRG10,
		.code			= MEDIA_BUS_FMT_SGIRG10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SIGGR10,
		.code			= MEDIA_BUS_FMT_SIGGR10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SGIBG10,
		.code			= MEDIA_BUS_FMT_SGIBG10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	}, {
		.fourcc			= V4L2_PIX_FMT_SIGGB10,
		.code			= MEDIA_BUS_FMT_SIGGB10_1X10,
		.csi_dt			= MIPI_CSI2_DT_RAW10,
		.bpp			= 16,
		.size			= SHIM_DMACNTX_SIZE_16,
	},

	/* More formats can be supported but they are not listed for now. */
};

/* Forward declaration needed by ti_csi2rx_dma_callback. */
static int ti_csi2rx_start_dma(struct ti_csi2rx_ctx *ctx,
			       struct ti_csi2rx_buffer *buf);

static const struct ti_csi2rx_fmt *find_format_by_fourcc(u32 pixelformat)
{
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
		if (ti_csi2rx_formats[i].fourcc == pixelformat)
			return &ti_csi2rx_formats[i];
	}

	return NULL;
}

static const struct ti_csi2rx_fmt *find_format_by_code(u32 code)
{
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
		if (ti_csi2rx_formats[i].code == code)
			return &ti_csi2rx_formats[i];
	}

	return NULL;
}

static void ti_csi2rx_fill_fmt(const struct ti_csi2rx_fmt *csi_fmt,
			       struct v4l2_format *v4l2_fmt)
{
	struct v4l2_pix_format *pix = &v4l2_fmt->fmt.pix;
	unsigned int pixels_in_word;
	u8 bpp = csi_fmt->bpp;
	u32 bpl;

	pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / bpp;

	pix->width = clamp_t(unsigned int, pix->width,
			     pixels_in_word,
			     MAX_WIDTH_BYTES * 8 / bpp);

	// HACK: Allow non-16-aligned width
	/*pix->width = rounddown(pix->width, pixels_in_word);*/

	pix->height = clamp_t(unsigned int, pix->height, 1, MAX_HEIGHT_LINES);

	v4l2_fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	pix->pixelformat = csi_fmt->fourcc;
	pix->colorspace = V4L2_COLORSPACE_SRGB;
	pix->sizeimage = pix->height * pix->width * (bpp / 8);

	bpl = (pix->width * ALIGN(bpp, 8)) >> 3;

	// HACK: Allow non-16-aligned width
	//pix->bytesperline = ALIGN(bpl, 16);
}

static int ti_csi2rx_querycap(struct file *file, void *priv,
			      struct v4l2_capability *cap)
{
	strscpy(cap->driver, TI_CSI2RX_MODULE_NAME, sizeof(cap->driver));
	strscpy(cap->card, TI_CSI2RX_MODULE_NAME, sizeof(cap->card));

	return 0;
}

static int ti_csi2rx_enum_fmt_vid_cap(struct file *file, void *priv,
				      struct v4l2_fmtdesc *f)
{
	const struct ti_csi2rx_fmt *fmt = NULL;

	if (f->mbus_code) {
	/* 1-to-1 mapping between bus formats and pixel formats */
		if (f->index > 0)
			return -EINVAL;

		fmt = find_format_by_code(f->mbus_code);
	} else {
		if (f->index >= ARRAY_SIZE(ti_csi2rx_formats))
			return -EINVAL;

		fmt = &ti_csi2rx_formats[f->index];
	}

	if (!fmt)
		return -EINVAL;

	f->pixelformat = fmt->fourcc;
	memset(f->reserved, 0, sizeof(f->reserved));
	f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

	return 0;
}

static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *prov,
				   struct v4l2_format *f)
{
	struct ti_csi2rx_ctx *csi = video_drvdata(file);

	*f = csi->v_fmt;

	return 0;
}

static int ti_csi2rx_try_fmt_vid_cap(struct file *file, void *priv,
				     struct v4l2_format *f)
{
	const struct ti_csi2rx_fmt *fmt;

	/*
	 * Default to the first format if the requested pixel format code isn't
	 * supported.
	 */
	fmt = find_format_by_fourcc(f->fmt.pix.pixelformat);
	if (!fmt)
		fmt = &ti_csi2rx_formats[0];

		/* Interlaced formats are not supported. */
		f->fmt.pix.field = V4L2_FIELD_NONE;

	ti_csi2rx_fill_fmt(fmt, f);

	return 0;
}

static int ti_csi2rx_s_fmt_vid_cap(struct file *file, void *priv,
				   struct v4l2_format *f)
{
	struct ti_csi2rx_ctx *csi = video_drvdata(file);
	struct vb2_queue *q = &csi->vidq;
	int ret;

	if (vb2_is_busy(q))
		return -EBUSY;

	ret = ti_csi2rx_try_fmt_vid_cap(file, priv, f);
	if (ret < 0)
		return ret;

	csi->v_fmt = *f;

	return 0;
}

static int ti_csi2rx_enum_framesizes(struct file *file, void *fh,
				     struct v4l2_frmsizeenum *fsize)
{
	const struct ti_csi2rx_fmt *fmt;
	unsigned int pixels_in_word;
	u8 bpp;

	fmt = find_format_by_fourcc(fsize->pixel_format);
	if (!fmt || fsize->index != 0)
		return -EINVAL;

	bpp = ALIGN(fmt->bpp, 8);

	/*
	 * Number of pixels in one PSI-L word. The transfer happens in multiples
	 * of PSI-L word sizes.
	 */
	pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / bpp;

	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
	fsize->stepwise.min_width = pixels_in_word;
	fsize->stepwise.max_width = rounddown(MAX_WIDTH_BYTES * 8 / bpp,
					      pixels_in_word);

	// HACK: Set step width to 1 to allow non-16 aligned transfers
	fsize->stepwise.step_width = 1;
	fsize->stepwise.min_height = 1;
	fsize->stepwise.max_height = MAX_HEIGHT_LINES;
	fsize->stepwise.step_height = 1;

	return 0;
}

static const struct v4l2_ioctl_ops csi_ioctl_ops = {
	.vidioc_querycap      = ti_csi2rx_querycap,
	.vidioc_enum_fmt_vid_cap = ti_csi2rx_enum_fmt_vid_cap,
	.vidioc_try_fmt_vid_cap = ti_csi2rx_try_fmt_vid_cap,
	.vidioc_g_fmt_vid_cap = ti_csi2rx_g_fmt_vid_cap,
	.vidioc_s_fmt_vid_cap = ti_csi2rx_s_fmt_vid_cap,
	.vidioc_enum_framesizes = ti_csi2rx_enum_framesizes,
	.vidioc_reqbufs       = vb2_ioctl_reqbufs,
	.vidioc_create_bufs   = vb2_ioctl_create_bufs,
	.vidioc_prepare_buf   = vb2_ioctl_prepare_buf,
	.vidioc_querybuf      = vb2_ioctl_querybuf,
	.vidioc_qbuf          = vb2_ioctl_qbuf,
	.vidioc_dqbuf         = vb2_ioctl_dqbuf,
	.vidioc_expbuf        = vb2_ioctl_expbuf,
	.vidioc_streamon      = vb2_ioctl_streamon,
	.vidioc_streamoff     = vb2_ioctl_streamoff,
};

static const struct v4l2_file_operations csi_fops = {
	.owner = THIS_MODULE,
	.open = v4l2_fh_open,
	.release = vb2_fop_release,
	.read = vb2_fop_read,
	.poll = vb2_fop_poll,
	.unlocked_ioctl = video_ioctl2,
	.mmap = vb2_fop_mmap,
};

static int csi_async_notifier_bound(struct v4l2_async_notifier *notifier,
				    struct v4l2_subdev *subdev,
				    struct v4l2_async_connection *asc)
{
	struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);

	csi->source = subdev;

	return 0;
}

static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier)
{
	struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
	int ret, i;

	/* Create link from source to subdev */
	ret = v4l2_create_fwnode_links_to_pad(csi->source,
					      &csi->pads[TI_CSI2RX_PAD_SINK],
					      MEDIA_LNK_FL_IMMUTABLE |
					      MEDIA_LNK_FL_ENABLED);
	if (ret)
		return ret;

/* Create and link video nodes for all DMA contexts */
	for (i = 0; i < csi->num_ctx; i++) {
		struct ti_csi2rx_ctx *ctx = &csi->ctx[i];
		struct video_device *vdev = &ctx->vdev;

		ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
		if (ret)
			goto unregister_dev;

		ret = media_create_pad_link(&csi->subdev.entity,
					    TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
					    &vdev->entity, 0,
					    MEDIA_LNK_FL_IMMUTABLE |
					    MEDIA_LNK_FL_ENABLED);
		if (ret) {
			video_unregister_device(vdev);
			goto unregister_dev;
		}
	}

	ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
if (ret)
		goto unregister_dev;

	return 0;

unregister_dev:
	i--;
	for (; i >= 0; i--)
		video_unregister_device(&csi->ctx[i].vdev);
	return ret;
}

static const struct v4l2_async_notifier_operations csi_async_notifier_ops = {
	.bound = csi_async_notifier_bound,
	.complete = csi_async_notifier_complete,
};

static int ti_csi2rx_notifier_register(struct ti_csi2rx_dev *csi)
{
	struct fwnode_handle *fwnode;
	struct v4l2_async_connection *asc;
	struct device_node *node;
	int ret;

	node = of_get_child_by_name(csi->dev->of_node, "csi-bridge");
	if (!node)
		return -EINVAL;

	fwnode = of_fwnode_handle(node);
	if (!fwnode) {
		of_node_put(node);
		return -EINVAL;
	}

	v4l2_async_nf_init(&csi->notifier, &csi->v4l2_dev);
	csi->notifier.ops = &csi_async_notifier_ops;

	asc = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
				       struct v4l2_async_connection);
	of_node_put(node);
	if (IS_ERR(asc)) {
		v4l2_async_nf_cleanup(&csi->notifier);
		return PTR_ERR(asc);
	}

	ret = v4l2_async_nf_register(&csi->notifier);
	if (ret) {
		v4l2_async_nf_cleanup(&csi->notifier);
		return ret;
	}

	return 0;
}

static void ti_csi2rx_setup_shim(struct ti_csi2rx_ctx *ctx)
{
	struct ti_csi2rx_dev *csi = ctx->csi;
	const struct ti_csi2rx_fmt *fmt;
	unsigned int reg;

	fmt = find_format_by_fourcc(ctx->v_fmt.fmt.pix.pixelformat);
	
	/* De-assert the pixel interface reset. */
	if (!csi->enable_count) {
		reg = SHIM_CNTL_PIX_RST;
		writel(reg, csi->shim + SHIM_CNTL);
	}

	reg = SHIM_DMACNTX_EN;
	reg |= FIELD_PREP(SHIM_DMACNTX_FMT, fmt->csi_dt);

	/*
	 * The hardware assumes incoming YUV422 8-bit data on MIPI CSI2 bus
	 * follows the spec and is packed in the order U0 -> Y0 -> V0 -> Y1 ->
	 * ...
	 *
	 * There is an option to swap the bytes around before storing in
	 * memory, to achieve different pixel formats:
	 *
	 * Byte3 <----------- Byte0
	 * [ Y1 ][ V0 ][ Y0 ][ U0 ]	MODE 11
	 * [ Y1 ][ U0 ][ Y0 ][ V0 ]	MODE 10
	 * [ V0 ][ Y1 ][ U0 ][ Y0 ]	MODE 01
	 * [ U0 ][ Y1 ][ V0 ][ Y0 ]	MODE 00
	 *
	 * We don't have any requirement to change pixelformat from what is
	 * coming from the source, so we keep it in MODE 11, which does not
	 * swap any bytes when storing in memory.
	 */
	switch (fmt->fourcc) {
	case V4L2_PIX_FMT_UYVY:
	case V4L2_PIX_FMT_VYUY:
	case V4L2_PIX_FMT_YUYV:
	case V4L2_PIX_FMT_YVYU:
		reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
					SHIM_DMACNTX_YUV422_MODE_11);
		break;
	default:
		/* Ignore if not YUV 4:2:2 */
		break;
	}

	reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
	reg |= FIELD_PREP(SHIM_DMACNTX_VC, ctx->vc);

	writel(reg, csi->shim + SHIM_DMACNTX(ctx->idx));

	reg = FIELD_PREP(SHIM_PSI_CFG0_SRC_TAG, 0) |
	      FIELD_PREP(SHIM_PSI_CFG0_DST_TAG, 0);
	writel(reg, csi->shim + SHIM_PSI_CFG0(ctx->idx));
}

static void ti_csi2rx_drain_callback(void *param)
{
	struct completion *drain_complete = param;

	complete(drain_complete);
}

/*
 * Drain the stale data left at the PSI-L endpoint.
 *
 * This might happen if no buffers are queued in time but source is still
 * streaming. In multi-stream scenarios this can happen when one stream is
 * stopped but other is still streaming, and thus module-level pixel reset is
 * not asserted.
 *
 * To prevent that stale data corrupting the subsequent transactions, it is
 * required to issue DMA requests to drain it out.
 */
static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *ctx)
{
	struct ti_csi2rx_dev *csi = ctx->csi;
	struct dma_async_tx_descriptor *desc;
	struct completion drain_complete;
	dma_cookie_t cookie;
	int ret;

	init_completion(&drain_complete);

	desc = dmaengine_prep_slave_single(ctx->dma.chan, csi->drain.paddr,
					   csi->drain.len, DMA_DEV_TO_MEM,
					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
	if (!desc) {
		ret = -EIO;
		goto out;
	}

	desc->callback = ti_csi2rx_drain_callback;
	desc->callback_param = &drain_complete;

	cookie = dmaengine_submit(desc);
	ret = dma_submit_error(cookie);
	if (ret)
		goto out;

	dma_async_issue_pending(ctx->dma.chan);

	if (!wait_for_completion_timeout(&drain_complete,
					 msecs_to_jiffies(DRAIN_TIMEOUT_MS))) {
		dmaengine_terminate_sync(ctx->dma.chan);
		dev_dbg(csi->dev, "DMA transfer timed out for drain buffer\n");
		ret = -ETIMEDOUT;
		goto out;
	}
out:
	return ret;
}

static int ti_csi2rx_dma_submit_pending(struct ti_csi2rx_ctx *ctx)
{
	struct ti_csi2rx_dma *dma = &ctx->dma;
	struct ti_csi2rx_buffer *buf;
	int ret = 0;

	/* If there are more buffers to process then start their transfer. */
	while (!list_empty(&dma->queue)) {
		buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
		ret = ti_csi2rx_start_dma(ctx, buf);
		if (ret) {
			dev_err(ctx->csi->dev,
				"Failed to queue the next buffer for DMA\n");
			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
			break;
		}
		list_move_tail(&buf->list, &dma->submitted);
	}
	return ret;
}

static void ti_csi2rx_dma_callback(void *param,
				   const struct dmaengine_result *result)
{
	struct ti_csi2rx_buffer *buf = param;
	struct ti_csi2rx_ctx *ctx = buf->ctx;
	struct ti_csi2rx_dma *dma = &ctx->dma;
	unsigned long flags = 0;

	/*
	 * TODO: Derive the sequence number from the CSI2RX frame number
	 * hardware monitor registers.
	 */
	buf->vb.vb2_buf.timestamp = ktime_get_ns();
	buf->vb.sequence = ctx->sequence++;

	spin_lock_irqsave(&dma->lock, flags);

	WARN_ON(!list_is_first(&buf->list, &dma->submitted));
	if (0 == (ctx->ok_frame_cnt + ctx->ng_frame_cnt))
	{
 		ctx->rx_start = ktime_get();
	}

	if (result && (result->result != 5) & (result->result != DMA_TRANS_NOERROR || result->residue != 0))
	{
#if 0 // The log may bring something can't restored unless reboot the system, according to Qingfeng's test.
		dev_err(ctx->csi->dev, "Failed DMA transfer for frame#%u of (stream: %d idx: %d, vc: %d),timestamp: %lld result=%d, residue=%u\n",
			buf->vb.sequence, ctx->stream, ctx->idx, ctx->vc, buf->vb.vb2_buf.timestamp, result->result, result->residue
			);
#endif
		ctx->ng_frame_cnt++;
		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
	} else {
		ctx->ok_frame_cnt++;
		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
	}
	list_del(&buf->list);

	ti_csi2rx_dma_submit_pending(ctx);

	if (list_empty(&dma->submitted))
		dma->state = TI_CSI2RX_DMA_IDLE;

	spin_unlock_irqrestore(&dma->lock, flags);
}

static int ti_csi2rx_start_dma(struct ti_csi2rx_ctx *ctx,
			       struct ti_csi2rx_buffer *buf)
{
	unsigned long addr;
	struct dma_async_tx_descriptor *desc;
	size_t len = ctx->v_fmt.fmt.pix.sizeimage;
	dma_cookie_t cookie;
	int ret = 0;
	struct sg_table *sg;

	// addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
	// desc = dmaengine_prep_slave_single(ctx->dma.chan, addr, len,
	// 				   DMA_DEV_TO_MEM,
	// 				   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);

	sg = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
	/*dev_err(ctx->csi->dev, "%s: Got scatter gather list with nents %d", __func__, sg->nents);*/
	desc = dmaengine_prep_slave_sg(ctx->dma.chan, sg->sgl, sg->nents,
				       DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
				       DMA_CTRL_ACK);
	if (!desc)
		return -EIO;

	desc->callback_result  = ti_csi2rx_dma_callback;
	desc->callback_param = buf;

	cookie = dmaengine_submit(desc);
	ret = dma_submit_error(cookie);
	if (ret)
		return ret;

	dma_async_issue_pending(ctx->dma.chan);

	return 0;
}

static void ti_csi2rx_stop_dma(struct ti_csi2rx_ctx *ctx)
{
	struct ti_csi2rx_dma *dma = &ctx->dma;
	enum ti_csi2rx_dma_state state;
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&dma->lock, flags);
	state = ctx->dma.state;
	dma->state = TI_CSI2RX_DMA_STOPPED;
	spin_unlock_irqrestore(&dma->lock, flags);

	if (state != TI_CSI2RX_DMA_STOPPED) {
		/*
		 * Normal DMA termination does not clean up pending data on
		 * the endpoint if multiple streams are running and only one
		 * is stopped, as the module-level pixel reset cannot be
		 * enforced before terminating DMA.
		 */
		ret = ti_csi2rx_drain_dma(ctx);
		if (ret && ret != -ETIMEDOUT)
			dev_warn(ctx->csi->dev,
				 "Failed to drain DMA. Next frame might be bogus\n");
	}

	ret = dmaengine_terminate_sync(ctx->dma.chan);
	if (ret)
		dev_err(ctx->csi->dev, "Failed to stop DMA: %d\n", ret);
}

static void ti_csi2rx_cleanup_buffers(struct ti_csi2rx_ctx *ctx,
				      enum vb2_buffer_state state)
{
	struct ti_csi2rx_dma *dma = &ctx->dma;
	struct ti_csi2rx_buffer *buf, *tmp;
		unsigned long flags;
	
	spin_lock_irqsave(&dma->lock, flags);
	list_for_each_entry_safe(buf, tmp, &ctx->dma.queue, list) {
		list_del(&buf->list);
		vb2_buffer_done(&buf->vb.vb2_buf, state);
	}
	list_for_each_entry_safe(buf, tmp, &ctx->dma.submitted, list) {
		list_del(&buf->list);
		vb2_buffer_done(&buf->vb.vb2_buf, state);
	}
	spin_unlock_irqrestore(&dma->lock, flags);
}

static int ti_csi2rx_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
				 unsigned int *nplanes, unsigned int sizes[],
				 struct device *alloc_devs[])
{
	struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(q);
	unsigned int size = ctx->v_fmt.fmt.pix.sizeimage;

	if (*nplanes) {
		if (sizes[0] < size)
			return -EINVAL;
		size = sizes[0];
	}

	*nplanes = 1;
	sizes[0] = size;

	return 0;
}

static int ti_csi2rx_buffer_prepare(struct vb2_buffer *vb)
{
	struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
	unsigned long size = ctx->v_fmt.fmt.pix.sizeimage;

	if (vb2_plane_size(vb, 0) < size) {
		dev_err(ctx->csi->dev, "Data will not fit into plane\n");
		return -EINVAL;
	}

	vb2_set_plane_payload(vb, 0, size);
	return 0;
}

static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
{
	struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
	struct ti_csi2rx_buffer *buf;
	struct ti_csi2rx_dma *dma = &ctx->dma;
	bool restart_dma = false;
	unsigned long flags = 0;
	int ret;

	buf = container_of(vb, struct ti_csi2rx_buffer, vb.vb2_buf);
	buf->ctx = ctx;

	spin_lock_irqsave(&dma->lock, flags);
	/*
	 * Usually the DMA callback takes care of queueing the pending buffers.
	 * But if DMA has stalled due to lack of buffers, restart it now.
	 */
	if (dma->state == TI_CSI2RX_DMA_IDLE) {
		/*
		 * Do not restart DMA with the lock held because
		 * ti_csi2rx_drain_dma() might block for completion.
		 * There won't be a race on queueing DMA anyway since the
		 * callback is not being fired.
		 */
		restart_dma = true;
		dma->state = TI_CSI2RX_DMA_ACTIVE;
	} else {
		list_add_tail(&buf->list, &dma->queue);
	}
	spin_unlock_irqrestore(&dma->lock, flags);

	if (restart_dma) {
		/*
		 * Once frames start dropping, some data gets stuck in the DMA
		 * pipeline somewhere. So the first DMA transfer after frame
		 * drops gives a partial frame. This is obviously not useful to
		 * the application and will only confuse it. Issue a DMA
		 * transaction to drain that up.
		 */
		ret = ti_csi2rx_drain_dma(ctx);
		if (ret && ret != -ETIMEDOUT)
			dev_warn(ctx->csi->dev,
				 "Failed to drain DMA. Next frame might be bogus\n");

		spin_lock_irqsave(&dma->lock, flags);
		ret = ti_csi2rx_start_dma(ctx, buf);
		if (ret) {
			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
			dma->state = TI_CSI2RX_DMA_IDLE;
			spin_unlock_irqrestore(&dma->lock, flags);
			dev_err(ctx->csi->dev, "Failed to start DMA: %d\n", ret);
		} else {
			list_add_tail(&buf->list, &dma->submitted);
			spin_unlock_irqrestore(&dma->lock, flags);
		}
	}
}

static int ti_csi2rx_get_vc(struct ti_csi2rx_ctx *ctx)
{
	struct ti_csi2rx_dev *csi = ctx->csi;
	struct v4l2_mbus_frame_desc fd;
	struct media_pad *pad;
	int ret, i;

	pad = media_entity_remote_pad_unique(&csi->subdev.entity, MEDIA_PAD_FL_SOURCE);
	if (!pad)
		return -ENODEV;

	ret = v4l2_subdev_call(csi->source, pad, get_frame_desc, pad->index,
			       &fd);
	if (ret)
		return ret;

	if (fd.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
		return -EINVAL;

	for (i = 0; i < fd.num_entries; i++) {
		if (ctx->stream == fd.entry[i].stream)
			return fd.entry[i].bus.csi2.vc;
	}

	return -ENODEV;
}

static int ti_csi2rx_start_streaming(struct vb2_queue *vq, unsigned int count)
{
	struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vq);
	struct ti_csi2rx_dev *csi = ctx->csi;
	struct ti_csi2rx_dma *dma = &ctx->dma;
	struct v4l2_subdev_krouting *routing;
	struct v4l2_subdev_route *route = NULL;
	struct media_pad *remote_pad;
	unsigned long flags;
	int ret = 0, i;
	struct v4l2_subdev_state *state;

	ret = pm_runtime_resume_and_get(csi->dev);
	if (ret)
		return ret;

	spin_lock_irqsave(&dma->lock, flags);
	if (list_empty(&dma->queue))
		ret = -EIO;
	spin_unlock_irqrestore(&dma->lock, flags);
	if (ret)
		return ret;

	ret = video_device_pipeline_start(&ctx->vdev, &csi->pipe);
	if (ret)
		goto err;

	remote_pad = media_entity_remote_source_pad_unique(ctx->pad.entity);
	if (!remote_pad) {
		ret = -ENODEV;
		goto err;
	}

	state = v4l2_subdev_lock_and_get_active_state(&csi->subdev);

	routing = &state->routing;

	/* Find the stream to process. */
	for (i = 0; i < routing->num_routes; i++) {
		struct v4l2_subdev_route *r = &routing->routes[i];

		if (!(r->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
			continue;

		if (r->source_pad != remote_pad->index)
			continue;

		route = r;
		break;
	}

	if (!route) {
		ret = -ENODEV;
		v4l2_subdev_unlock_state(state);
		goto err;
	}

	ctx->stream = route->sink_stream;

	v4l2_subdev_unlock_state(state);

	ret = ti_csi2rx_get_vc(ctx);
	if (ret == -ENOIOCTLCMD)
		ctx->vc = 0;
	else if (ret < 0)
		goto err;
	else
		ctx->vc = ret;

	ti_csi2rx_setup_shim(ctx);

	ctx->sequence = 0;

	spin_lock_irqsave(&dma->lock, flags);

	ret = ti_csi2rx_dma_submit_pending(ctx);
	if (ret) {
		spin_unlock_irqrestore(&dma->lock, flags);
				goto err_dma;
	}

	dma->state = TI_CSI2RX_DMA_ACTIVE;
	spin_unlock_irqrestore(&dma->lock, flags);

	ret = v4l2_subdev_enable_streams(&csi->subdev,
					 TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
					 BIT(0));
	if (ret)
		goto err_dma;

	return 0;

err_dma:
    ti_csi2rx_stop_dma(ctx);
	video_device_pipeline_stop(&ctx->vdev);
	writel(0, csi->shim + SHIM_CNTL);
	writel(0, csi->shim + SHIM_DMACNTX(ctx->idx));
err:
	ti_csi2rx_cleanup_buffers(ctx, VB2_BUF_STATE_QUEUED);
	pm_runtime_put(csi->dev);

	return ret;
}

static void ti_csi2rx_stop_streaming(struct vb2_queue *vq)
{
	struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vq);
	struct ti_csi2rx_dev *csi = ctx->csi;
	int ret;

	/* assert pixel reset to prevent stale data on stopping last stream */
	if (csi->enable_count == 1)
		writel(0, csi->shim + SHIM_CNTL);

	video_device_pipeline_stop(&ctx->vdev);
	writel(0, csi->shim + SHIM_DMACNTX(ctx->idx));

	ret = v4l2_subdev_disable_streams(&csi->subdev,
					  TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
					  BIT(0));

	if (ret)
		dev_err(csi->dev, "Failed to stop subdev stream\n");

	ti_csi2rx_stop_dma(ctx);
	ti_csi2rx_cleanup_buffers(ctx, VB2_BUF_STATE_ERROR);
	pm_runtime_put(csi->dev);
}

static const struct vb2_ops csi_vb2_qops = {
	.queue_setup = ti_csi2rx_queue_setup,
	.buf_prepare = ti_csi2rx_buffer_prepare,
	.buf_queue = ti_csi2rx_buffer_queue,
	.start_streaming = ti_csi2rx_start_streaming,
	.stop_streaming = ti_csi2rx_stop_streaming,
	.wait_prepare = vb2_ops_wait_prepare,
	.wait_finish = vb2_ops_wait_finish,
};

static inline struct ti_csi2rx_dev *to_csi2rx_dev(struct v4l2_subdev *sd)
{
	return container_of(sd, struct ti_csi2rx_dev, subdev);
}

static int ti_csi2rx_sd_set_fmt(struct v4l2_subdev *sd,
				struct v4l2_subdev_state *state,
				struct v4l2_subdev_format *format)
{
	struct v4l2_mbus_framefmt *fmt;
	int ret = 0;

	/* No transcoding, don't allow setting source fmt */
	if (format->pad >= TI_CSI2RX_PAD_FIRST_SOURCE)
		return v4l2_subdev_get_fmt(sd, state, format);

	if (!find_format_by_code(format->format.code))
		format->format.code = ti_csi2rx_formats[0].code;

	format->format.field = V4L2_FIELD_NONE;

	fmt = v4l2_subdev_state_get_stream_format(state, format->pad, format->stream);
	if (!fmt) {
		ret = -EINVAL;
		goto out;
	}
	*fmt = format->format;

	fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
							   format->stream);
	if (!fmt) {
		ret = -EINVAL;
		goto out;
	}
	*fmt = format->format;

out:
	return ret;
}

static int _ti_csi2rx_sd_set_routing(struct v4l2_subdev *sd,
				     struct v4l2_subdev_state *state,
				     struct v4l2_subdev_krouting *routing)
{
	int ret;

	const struct v4l2_mbus_framefmt format = {
		.width = 640,
		.height = 480,
		.code = MEDIA_BUS_FMT_UYVY8_1X16,
		.field = V4L2_FIELD_NONE,
		.colorspace = V4L2_COLORSPACE_SRGB,
		.ycbcr_enc = V4L2_YCBCR_ENC_601,
		.quantization = V4L2_QUANTIZATION_LIM_RANGE,
		.xfer_func = V4L2_XFER_FUNC_SRGB,
	};

	ret = v4l2_subdev_routing_validate(sd, routing,
					   V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
					   V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING);

	if (ret)
		return ret;

	ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);

	return ret;
}

static int ti_csi2rx_sd_set_routing(struct v4l2_subdev *sd,
				    struct v4l2_subdev_state *state,
				    enum v4l2_subdev_format_whence which,
				    struct v4l2_subdev_krouting *routing)
{
	return _ti_csi2rx_sd_set_routing(sd, state, routing);
}

static int ti_csi2rx_sd_init_cfg(struct v4l2_subdev *sd,
				 struct v4l2_subdev_state *state)
{
	struct v4l2_subdev_route routes[] = { {
		.sink_pad = 0,
		.sink_stream = 0,
		.source_pad = TI_CSI2RX_PAD_FIRST_SOURCE,
		.source_stream = 0,
		.flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
	} };

	struct v4l2_subdev_krouting routing = {
		.num_routes = 1,
		.routes = routes,
	};

	/* Initialize routing to single route to the fist source pad */
	return _ti_csi2rx_sd_set_routing(sd, state, &routing);
}

static int ti_csi2rx_sd_all_sink_streams(struct v4l2_subdev_state *state)
{
	struct v4l2_subdev_krouting *routing = &state->routing;
	u64 sink_streams = 0;
	int i;

	for (i = 0; i < routing->num_routes; i++) {
		struct v4l2_subdev_route *r = &routing->routes[i];

		if (r->sink_pad == TI_CSI2RX_PAD_SINK)
			sink_streams |= BIT(r->sink_stream);
	}

	return sink_streams;
}

static int ti_csi2rx_sd_enable_streams(struct v4l2_subdev *sd,
				       struct v4l2_subdev_state *state,
				       u32 pad, u64 streams_mask)
{
	struct ti_csi2rx_dev *csi = to_csi2rx_dev(sd);
	struct media_pad *remote_pad;
	int ret = 0;

	remote_pad = media_entity_remote_source_pad_unique(&csi->subdev.entity);
	if (!remote_pad)
		return -ENODEV;

	mutex_lock(&csi->mutex);

	if (csi->enable_multi_stream) {
		dev_dbg(csi->dev, "Enabling pad: %d, streams_mask: %lld.\n", pad, streams_mask);
		ret = v4l2_subdev_enable_streams(csi->source, remote_pad->index, BIT(csi->enable_count));
		if (ret)
			goto out;

		csi->enabled_streams_mask |= BIT(csi->enable_count);
	} else {
		if (!csi->enable_count) {
			u64 sink_streams;
	
			sink_streams = ti_csi2rx_sd_all_sink_streams(state);
			dev_dbg(csi->dev, "Enabling all streams (%llx) on sink.\n",
				sink_streams);
			ret = v4l2_subdev_enable_streams(csi->source, remote_pad->index,
							 sink_streams);
			if (ret)
				goto out;
			csi->enabled_streams_mask = sink_streams;
		}
	}

	csi->enable_count++;
out:
	mutex_unlock(&csi->mutex);
	return ret;
}

static int ti_csi2rx_sd_disable_streams(struct v4l2_subdev *sd,
					struct v4l2_subdev_state *state,
					u32 pad, u64 streams_mask)
{
	struct ti_csi2rx_dev *csi = to_csi2rx_dev(sd);
	struct media_pad *remote_pad;
	int ret = 0;

	remote_pad = media_entity_remote_source_pad_unique(&csi->subdev.entity);
	if (!remote_pad)
		return -ENODEV;

	mutex_lock(&csi->mutex);
	if (csi->enable_count == 0) {
		ret = -EINVAL;
		goto out;
	}

	if (csi->enable_multi_stream) {
		ret = v4l2_subdev_disable_streams(csi->source, remote_pad->index, BIT(csi->enable_count - 1));
		if (ret)
			goto out;

		csi->enabled_streams_mask &= ~(BIT(csi->enable_count - 1));
		if (csi->enable_count == 1) {
			csi->enable_multi_stream = false;
		}
	} else {
		if (csi->enable_count == 1) {
			u64 sink_streams;

			sink_streams = ti_csi2rx_sd_all_sink_streams(state);
			dev_dbg(csi->dev, "Disabling all streams (%llx) on sink.\n",
				sink_streams);
			ret = v4l2_subdev_disable_streams(csi->source, remote_pad->index,
							  sink_streams);
			if (ret)
				goto out;
			csi->enabled_streams_mask = 0;
		}
	}

	--csi->enable_count;
out:
	mutex_unlock(&csi->mutex);
	return ret;
}

static long ti_csi2rx_sd_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
	struct ti_csi2rx_dev *csi = to_csi2rx_dev(sd);
	int ret = 0;

	switch (cmd) {
	case ADTOF_ENABLE_STREAM_NUM:
		mutex_lock(&csi->mutex);
		if (*((u32 *)arg) > 0)
			csi->enable_multi_stream = true;

		dev_err(csi->dev, "enable multi stream: %u from %d.\n", csi->enable_multi_stream, *((u32 *)arg));
		mutex_unlock(&csi->mutex);
		break;

	default:
		return -ENOIOCTLCMD;
	}

	return ret;
}

static const struct v4l2_subdev_core_ops ti_csi2rx_subdev_core_ops =
{
	.ioctl = ti_csi2rx_sd_ioctl, 
};

static const struct v4l2_subdev_pad_ops ti_csi2rx_subdev_pad_ops = {
	.init_cfg = ti_csi2rx_sd_init_cfg,
	.set_routing = ti_csi2rx_sd_set_routing,
	.get_fmt = v4l2_subdev_get_fmt,
	.set_fmt = ti_csi2rx_sd_set_fmt,
	.enable_streams = ti_csi2rx_sd_enable_streams,
	.disable_streams = ti_csi2rx_sd_disable_streams,
};

static const struct v4l2_subdev_ops ti_csi2rx_subdev_ops = {
	.core = &ti_csi2rx_subdev_core_ops, 
	.pad = &ti_csi2rx_subdev_pad_ops,
};

static void ti_csi2rx_cleanup_dma(struct ti_csi2rx_ctx *ctx)
{
	dma_release_channel(ctx->dma.chan);
}

static void ti_csi2rx_cleanup_v4l2(struct ti_csi2rx_dev *csi)
{
	v4l2_subdev_cleanup(&csi->subdev);
	media_device_unregister(&csi->mdev);
	v4l2_device_unregister(&csi->v4l2_dev);
	media_device_cleanup(&csi->mdev);
}

static void ti_csi2rx_cleanup_notifier(struct ti_csi2rx_dev *csi)
{
	v4l2_async_nf_unregister(&csi->notifier);
	v4l2_async_nf_cleanup(&csi->notifier);
}

static void ti_csi2rx_cleanup_vb2q(struct ti_csi2rx_ctx *ctx)
{
	vb2_queue_release(&ctx->vidq);
}

static void ti_csi2rx_cleanup_ctx(struct ti_csi2rx_ctx *ctx)
{
	if (!pm_runtime_status_suspended(ctx->csi->dev))
		ti_csi2rx_cleanup_dma(ctx);

	ti_csi2rx_cleanup_vb2q(ctx);

	video_unregister_device(&ctx->vdev);

	mutex_destroy(&ctx->mutex);
}

static int ti_csi2rx_init_vb2q(struct ti_csi2rx_ctx *ctx)
{
	struct vb2_queue *q = &ctx->vidq;
	int ret;

	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
	//q->io_modes = VB2_MMAP | VB2_DMABUF;
	q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
	q->drv_priv = ctx;
	q->buf_struct_size = sizeof(struct ti_csi2rx_buffer);
	q->ops = &csi_vb2_qops;
	//q->mem_ops = &vb2_dma_contig_memops;
	q->mem_ops = &vb2_dma_sg_memops;
	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
	q->dev = dmaengine_get_dma_device(ctx->dma.chan);
	q->lock = &ctx->mutex;
	q->min_buffers_needed = 1;
	q->allow_cache_hints = 1;

	ret = vb2_queue_init(q);
	if (ret)
		return ret;

	ctx->vdev.queue = q;

	return 0;
}

static int ti_csi2rx_link_validate_get_fmt(struct media_pad *pad,
					   struct v4l2_subdev_format *fmt)
{
	if (is_media_entity_v4l2_subdev(pad->entity)) {
		struct v4l2_subdev *sd =
			media_entity_to_v4l2_subdev(pad->entity);

		fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
		fmt->pad = pad->index;
		return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
	}

	WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L,
	     "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
	     pad->entity->function, pad->entity->name);

	return -EINVAL;
}

static int ti_csi2rx_link_validate(struct media_link *link)
{
	struct media_entity *entity = link->sink->entity;
	struct video_device *vdev = media_entity_to_video_device(entity);
	struct ti_csi2rx_ctx *ctx = container_of(vdev, struct ti_csi2rx_ctx, vdev);
	struct ti_csi2rx_dev *csi = ctx->csi;
	struct v4l2_pix_format *csi_fmt = &ctx->v_fmt.fmt.pix;
	struct v4l2_subdev_format source_fmt;
	const struct ti_csi2rx_fmt *ti_fmt;
	int ret;

	ret = ti_csi2rx_link_validate_get_fmt(link->source, &source_fmt);
	if (ret)
		return ret;

	if (source_fmt.format.width != csi_fmt->width) {
		dev_err(csi->dev, "Width does not match (source %u, sink %u)\n",
			source_fmt.format.width, csi_fmt->width);
		return -EPIPE;
	}

	if (source_fmt.format.height != csi_fmt->height) {
		dev_err(csi->dev, "Height does not match (source %u, sink %u)\n",
			source_fmt.format.height, csi_fmt->height);
		return -EPIPE;
	}

	if (source_fmt.format.field != csi_fmt->field &&
	    csi_fmt->field != V4L2_FIELD_NONE) {
		dev_err(csi->dev, "Field does not match (source %u, sink %u)\n",
			source_fmt.format.field, csi_fmt->field);
		return -EPIPE;
	}

	ti_fmt = find_format_by_code(source_fmt.format.code);
	if (!ti_fmt) {
		dev_err(csi->dev, "Media bus format 0x%x not supported\n",
			source_fmt.format.code);
		return -EPIPE;
	}

	if (ctx->v_fmt.fmt.pix.pixelformat != ti_fmt->fourcc) {
		dev_err(csi->dev,
			"Cannot transform source fmt 0x%x to sink fmt 0x%x\n",
			ctx->v_fmt.fmt.pix.pixelformat, ti_fmt->fourcc);
		return -EPIPE;
	}

	return 0;
}

static const struct media_entity_operations ti_csi2rx_video_entity_ops = {
	.link_validate = ti_csi2rx_link_validate,
};

static int ti_csi2rx_init_dma(struct ti_csi2rx_ctx *ctx)
{
	struct dma_slave_config cfg = {
		.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES,
	};
	char name[32];
	int ret;

	snprintf(name, sizeof(name), "rx%u", ctx->idx);
	ctx->dma.chan = dma_request_chan(ctx->csi->dev, name);
	if (IS_ERR(ctx->dma.chan))
		return PTR_ERR(ctx->dma.chan);

	ret = dmaengine_slave_config(ctx->dma.chan, &cfg);
	if (ret) {
		dma_release_channel(ctx->dma.chan);
		return ret;
	}

	return 0;
}

static int ti_csi2rx_v4l2_init(struct ti_csi2rx_dev *csi)
{
	struct media_device *mdev = &csi->mdev;
	struct v4l2_subdev *sd = &csi->subdev;
	int ret, i;

	mdev->dev = csi->dev;
	mdev->hw_revision = 1;
	strscpy(mdev->model, "TI-CSI2RX", sizeof(mdev->model));

	media_device_init(mdev);

	csi->v4l2_dev.mdev = mdev;

	ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
	if (ret)
		goto cleanup_media;

	ret = media_device_register(mdev);
	if (ret)
		goto unregister_v4l2;

	v4l2_subdev_init(sd, &ti_csi2rx_subdev_ops);
	sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
	sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS;
	strscpy(sd->name, dev_name(csi->dev), sizeof(sd->name));
	sd->dev = csi->dev;

	csi->pads[TI_CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;

	for (i = TI_CSI2RX_PAD_FIRST_SOURCE;
	     i < TI_CSI2RX_PAD_FIRST_SOURCE + csi->num_ctx; i++)
		csi->pads[i].flags = MEDIA_PAD_FL_SOURCE;

	ret = media_entity_pads_init(&sd->entity,
				     TI_CSI2RX_PAD_FIRST_SOURCE + csi->num_ctx,
				     csi->pads);
	if (ret)
		goto unregister_media;

	ret = v4l2_subdev_init_finalize(sd);
	if (ret)
		goto unregister_media;

	ret = v4l2_device_register_subdev(&csi->v4l2_dev, sd);
	if (ret)
		goto cleanup_subdev;

	return 0;

cleanup_subdev:
	v4l2_subdev_cleanup(sd);
unregister_media:
	media_device_unregister(mdev);
unregister_v4l2:
	v4l2_device_unregister(&csi->v4l2_dev);
cleanup_media:
	media_device_cleanup(mdev);

	return ret;
}

static int ti_csi2rx_init_ctx(struct ti_csi2rx_ctx *ctx)
{
	struct ti_csi2rx_dev *csi = ctx->csi;
	struct video_device *vdev = &ctx->vdev;
	const struct ti_csi2rx_fmt *fmt;
	struct v4l2_pix_format *pix_fmt = &ctx->v_fmt.fmt.pix;
	int ret;

	mutex_init(&ctx->mutex);

	fmt = find_format_by_fourcc(V4L2_PIX_FMT_UYVY);
	if (!fmt)
		return -EINVAL;

	pix_fmt->width = 640;
	pix_fmt->height = 480;
	pix_fmt->field = V4L2_FIELD_NONE;
	pix_fmt->colorspace = V4L2_COLORSPACE_SRGB;
	pix_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601,
	pix_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE,
	pix_fmt->xfer_func = V4L2_XFER_FUNC_SRGB,

	ti_csi2rx_fill_fmt(fmt, &ctx->v_fmt);

	ctx->pad.flags = MEDIA_PAD_FL_SINK;
	ret = media_entity_pads_init(&ctx->vdev.entity, 1, &ctx->pad);
	if (ret)
		return ret;

	snprintf(vdev->name, sizeof(vdev->name), "%s context %u",
		 dev_name(csi->dev), ctx->idx);
	vdev->v4l2_dev = &csi->v4l2_dev;
	vdev->vfl_dir = VFL_DIR_RX;
	vdev->fops = &csi_fops;
	vdev->ioctl_ops = &csi_ioctl_ops;
	vdev->release = video_device_release_empty;
	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
			    V4L2_CAP_IO_MC;
	vdev->lock = &ctx->mutex;
	video_set_drvdata(vdev, ctx);

	INIT_LIST_HEAD(&ctx->dma.queue);
	INIT_LIST_HEAD(&ctx->dma.submitted);
	spin_lock_init(&ctx->dma.lock);
	ctx->dma.state = TI_CSI2RX_DMA_STOPPED;

	ret = ti_csi2rx_init_dma(ctx);
	if (ret)
		return ret;

	ret = ti_csi2rx_init_vb2q(ctx);
	if (ret)
		goto cleanup_dma;

	return 0;

cleanup_dma:
	ti_csi2rx_cleanup_dma(ctx);
	return ret;
}

#ifdef CONFIG_PM
static int ti_csi2rx_suspend(struct device *dev)
{
	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
	enum ti_csi2rx_dma_state state;
	struct ti_csi2rx_ctx *ctx;
	struct ti_csi2rx_dma *dma;
	unsigned long flags = 0;
	int i, ret = 0;

	/* If device was not in use we can simply suspend */
	if (pm_runtime_status_suspended(dev))
		return 0;

	/*
	 * If device is running, assert the pixel reset to cleanly stop any
	 * on-going streams before we suspend.
	 */
	writel(0, csi->shim + SHIM_CNTL);

	for (i = 0; i < csi->num_ctx; i++) {
		ctx = &csi->ctx[i];
		dma = &ctx->dma;

		spin_lock_irqsave(&dma->lock, flags);
		state = dma->state;
		spin_unlock_irqrestore(&dma->lock, flags);

		if (state != TI_CSI2RX_DMA_STOPPED) {
			/* Disable source */
			ret = v4l2_subdev_disable_streams(&csi->subdev,
							  TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
							  BIT(0));
			if (ret)
				dev_err(csi->dev, "Failed to stop subdev stream\n");
		}

		/* Stop any on-going streams */
		writel(0, csi->shim + SHIM_DMACNTX(ctx->idx));

		/* Drain DMA */
		ti_csi2rx_drain_dma(ctx);

		/* Terminate DMA */
		ret = dmaengine_terminate_sync(ctx->dma.chan);
		if (ret)
			dev_err(csi->dev, "Failed to stop DMA\n");
	}

	return ret;
}

static int ti_csi2rx_resume(struct device *dev)
{
	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
	struct ti_csi2rx_ctx *ctx;
	struct ti_csi2rx_dma *dma;
	struct ti_csi2rx_buffer *buf;
	unsigned long flags = 0;
	unsigned int reg;
	int i, ret = 0;

	/* If device was not in use, we can simply wakeup */
	if (pm_runtime_status_suspended(dev))
		return 0;

	/* If device was in use before, restore all the running streams */
	reg = SHIM_CNTL_PIX_RST;
	writel(reg, csi->shim + SHIM_CNTL);

	for (i = 0; i < csi->num_ctx; i++) {
		ctx = &csi->ctx[i];
		dma = &ctx->dma;
		spin_lock_irqsave(&dma->lock, flags);
		if (dma->state != TI_CSI2RX_DMA_STOPPED) {
			/* Re-submit all previously submitted buffers to DMA */
			list_for_each_entry(buf, &ctx->dma.submitted, list) {
				ti_csi2rx_start_dma(ctx, buf);
			}
			spin_unlock_irqrestore(&dma->lock, flags);

			/* Restore stream config */
			ti_csi2rx_setup_shim(ctx);

			ret = v4l2_subdev_enable_streams(&csi->subdev,
							 TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
							 BIT(0));
			if (ret)
				dev_err(ctx->csi->dev, "Failed to start subdev\n");
		} else {
			spin_unlock_irqrestore(&dma->lock, flags);
		}
	}

	return ret;
}

static int ti_csi2rx_runtime_suspend(struct device *dev)
{
	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
	int i;

	if (csi->enable_count != 0)
		return -EBUSY;

	for (i = 0; i < csi->num_ctx; i++)
		ti_csi2rx_cleanup_dma(&csi->ctx[i]);

	return 0;
}

static int ti_csi2rx_runtime_resume(struct device *dev)
{
	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
	int ret, i;

	for (i = 0; i < csi->num_ctx; i++) {
		ret = ti_csi2rx_init_dma(&csi->ctx[i]);
		if (ret)
			return ret;
	}

	return 0;
}

static const struct dev_pm_ops ti_csi2rx_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(ti_csi2rx_suspend, ti_csi2rx_resume)
	SET_RUNTIME_PM_OPS(ti_csi2rx_runtime_suspend, ti_csi2rx_runtime_resume,
			   NULL)
};
#endif /* CONFIG_PM */

static ssize_t csi_rx_status_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
	int tmp_len = 0;
	char tmp[512] = {0};
	char *p_tmp;
	int i;
	s64 us_delta;
	int total_fps = 0;
	int ok_fps = 0;

	p_tmp = tmp;
	for (i = 0; i < csi->num_ctx; i++)
	{
		us_delta = ktime_us_delta(ktime_get(), csi->ctx[i].rx_start);
		total_fps = (csi->ctx[i].ok_frame_cnt + csi->ctx[i].ng_frame_cnt) / (us_delta / 1000000);
		ok_fps = (csi->ctx[i].ok_frame_cnt) / (us_delta / 1000000);
		tmp_len = sprintf(p_tmp, "stream[%d] ok: %d, ng: %d, total_fps: %d, ok_fps: %d\n", 
			i, csi->ctx[i].ok_frame_cnt, csi->ctx[i].ng_frame_cnt, total_fps, ok_fps);
		p_tmp += tmp_len;
	}

	return scnprintf(buf, PAGE_SIZE, "%s\n", tmp);
}

static ssize_t csi_rx_status_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf,
				size_t count)
{
	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
	int i;
	int ret = 0;
	char cmd[20];

	ret = sscanf(buf, "%s", cmd);
	if (ret == 1 && 6 == count)
	{
		ret = memcmp(cmd,"Clear",5);
		if (0 == ret)
		{
			for (i = 0; i < csi->num_ctx; i++)
			{
				csi->ctx[i].ok_frame_cnt = 0;
				csi->ctx[i].ng_frame_cnt = 0;
			}
			ret = 0; // avoid "write error: Operation not permitted"
		}
		else {
			return -EINVAL;
		}
	}
	else {
		return -EINVAL;
	}

	return ret ? -1 : count;
}

static DEVICE_ATTR_RW(csi_rx_status);

static struct attribute *ti_csi2rx_dev_attrs[] = {
	&dev_attr_csi_rx_status.attr,
	NULL,
};

static const struct attribute_group ti_csi2rx_dev_attrs_group = {
	.attrs = ti_csi2rx_dev_attrs,
};

static int ti_csi2rx_register_attrib_group(struct device *dev)
{
	return sysfs_create_group(&dev->kobj, &ti_csi2rx_dev_attrs_group);
}

static int ti_csi2rx_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct ti_csi2rx_dev *csi;
	int ret, i, count;

	csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
	if (!csi)
		return -ENOMEM;

	csi->dev = &pdev->dev;
	platform_set_drvdata(pdev, csi);

	csi->shim = devm_platform_ioremap_resource(pdev, 0);
	if (IS_ERR(csi->shim)) {
		ret = PTR_ERR(csi->shim);
		return ret;
	}

	csi->drain.len = DRAIN_BUFFER_SIZE;
	csi->drain.vaddr = dma_alloc_coherent(csi->dev, csi->drain.len,
					      &csi->drain.paddr,
					      GFP_KERNEL);
	if (!csi->drain.vaddr)
		return -ENOMEM;

	/* Only use as many contexts as the number of DMA channels allocated. */
	count = of_property_count_strings(np, "dma-names");
	if (count < 0) {
		dev_err(csi->dev, "Failed to get DMA channel count: %d\n",
			count);
		return count;
	}
	csi->enable_multi_stream = false;
	csi->num_ctx = count;
	if (csi->num_ctx > TI_CSI2RX_MAX_CTX) {
		dev_warn(csi->dev,
			 "%u DMA channels passed. Maximum is %u. Ignoring the rest.\n",
			 csi->num_ctx, TI_CSI2RX_MAX_CTX);
		csi->num_ctx = TI_CSI2RX_MAX_CTX;
	}

	mutex_init(&csi->mutex);

	ret = ti_csi2rx_v4l2_init(csi);
	if (ret)
		goto err_v4l2;

	for (i = 0; i < csi->num_ctx; i++) {
		csi->ctx[i].idx = i;
		csi->ctx[i].csi = csi;
		ret = ti_csi2rx_init_ctx(&csi->ctx[i]);
		if (ret)
			goto err_ctx;
	}

	ret = ti_csi2rx_notifier_register(csi);
	if (ret)
		goto err_ctx;

	ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev);
	if (ret) {
		dev_err(csi->dev, "Failed to create children: %d\n", ret);
		goto err_notifier;
	}

	ret = ti_csi2rx_register_attrib_group(csi->dev);
	if (ret) {
		dev_err(&pdev->dev, "Error creating sysfs attribute group for j721e-csi2rx driver\n");
	}


	pm_runtime_set_active(csi->dev);
	pm_runtime_enable(csi->dev);
	pm_request_idle(csi->dev);

	return 0;

err_notifier:
	ti_csi2rx_cleanup_notifier(csi);
err_ctx:
	i--;
	for (; i >= 0; i--)
		ti_csi2rx_cleanup_ctx(&csi->ctx[i]);
	ti_csi2rx_cleanup_v4l2(csi);
err_v4l2:
	mutex_destroy(&csi->mutex);
	dma_free_coherent(csi->dev, csi->drain.len, csi->drain.vaddr,
			  csi->drain.paddr);
	return ret;
}

static int ti_csi2rx_remove(struct platform_device *pdev)
{
	struct ti_csi2rx_dev *csi = platform_get_drvdata(pdev);
	int i;

	for (i = 0; i < csi->num_ctx; i++) {
		if (vb2_is_busy(&csi->ctx[i].vidq))
			dev_err(csi->dev,
				"Failed to remove as queue busy for ctx %u\n",
				i);
	}

	for (i = 0; i < csi->num_ctx; i++)
		ti_csi2rx_cleanup_ctx(&csi->ctx[i]);

	ti_csi2rx_cleanup_notifier(csi);
	ti_csi2rx_cleanup_v4l2(csi);
	mutex_destroy(&csi->mutex);
	dma_free_coherent(csi->dev, csi->drain.len, csi->drain.vaddr,
			  csi->drain.paddr);

	pm_runtime_disable(&pdev->dev);
	pm_runtime_set_suspended(&pdev->dev);

	return 0;
}

static const struct of_device_id ti_csi2rx_of_match[] = {
	{ .compatible = "ti,j721e-csi2rx-shim", },
	{ },
};
MODULE_DEVICE_TABLE(of, ti_csi2rx_of_match);

static struct platform_driver ti_csi2rx_pdrv = {
	.probe = ti_csi2rx_probe,
	.remove = ti_csi2rx_remove,
	.driver = {
		.name		= TI_CSI2RX_MODULE_NAME,
		.of_match_table	= ti_csi2rx_of_match,
#ifdef CONFIG_PM
		.pm		= &ti_csi2rx_pm_ops,
#endif
	},
};

module_platform_driver(ti_csi2rx_pdrv);

MODULE_DESCRIPTION("TI J721E CSI2 RX Driver");
MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>");
MODULE_LICENSE("GPL");

Each time there is a problem with the switching flow, there is an ng error frame

The first problematic switching curre:
root@am62pxx-evm:~# cat /sys/devices/platform/bus@f0000/30102000.ticsi2rx/csi_rx_status
stream[0] ok: 62311, ng: 0, total_fps: 537, ok_fps: 537
stream[1] ok: 62310, ng: 1, total_fps: 537, ok_fps: 537
stream[2] ok: 62310, ng: 0, total_fps: 537, ok_fps: 537
stream[3] ok: 62310, ng: 0, total_fps: 537, ok_fps: 537
stream[4] ok: 0, ng: 0, total_fps: 0, ok_fps: 0
stream[5] ok: 0, ng: 0, total_fps: 0, ok_fps: 0

two open:
root@am62pxx-evm:~# cat /sys/devices/platform/bus@f0000/30102000.ticsi2rx/csi_rx_status
stream[0] ok: 672411, ng: 0, total_fps: 622, ok_fps: 622
stream[1] ok: 672410, ng: 2, total_fps: 622, ok_fps: 622
stream[2] ok: 672410, ng: 0, total_fps: 622, ok_fps: 622
stream[3] ok: 672410, ng: 0, total_fps: 622, ok_fps: 622
stream[4] ok: 0, ng: 0, total_fps: 0, ok_fps: 0
stream[5] ok: 0, ng: 0, total_fps: 0, ok_fps: 0

three open:
root@am62pxx-evm:~# cat /sys/devices/platform/bus@f0000/30102000.ticsi2rx/csi_rx_status
stream[0] ok: 673719, ng: 0, total_fps: 620, ok_fps: 620
stream[1] ok: 673716, ng: 3, total_fps: 620, ok_fps: 620
stream[2] ok: 673717, ng: 0, total_fps: 620, ok_fps: 620
stream[3] ok: 673717, ng: 0, total_fps: 620, ok_fps: 620
stream[4] ok: 0, ng: 0, total_fps: 0, ok_fps: 0
stream[5] ok: 0, ng: 0, total_fps: 0, ok_fps: 0

Link:

 media-ctl -p
Media controller API version 6.6.32

Media device information
------------------------
driver          j721e-csi2rx
model           TI-CSI2RX
serial
bus info        platform:30102000.ticsi2rx
hw revision     0x1
driver version  6.6.32

Device topology
- entity 1: 30102000.ticsi2rx (7 pads, 7 links, 4 routes)
            type V4L2 subdev subtype Unknown flags 0
            device node name /dev/v4l-subdev0
        routes:
                0/0 -> 1/0 [ACTIVE]
                0/1 -> 2/0 [ACTIVE]
                0/2 -> 3/0 [ACTIVE]
                0/3 -> 4/0 [ACTIVE]
        pad0: Sink
                [stream:0 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                [stream:1 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                [stream:2 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                [stream:3 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                <- "cdns_csi2rx.30101000.csi-bridge":1 [ENABLED,IMMUTABLE]
        pad1: Source
                [stream:0 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                -> "30102000.ticsi2rx context 0":0 [ENABLED,IMMUTABLE]
        pad2: Source
                [stream:0 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                -> "30102000.ticsi2rx context 1":0 [ENABLED,IMMUTABLE]
        pad3: Source
                [stream:0 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                -> "30102000.ticsi2rx context 2":0 [ENABLED,IMMUTABLE]
        pad4: Source
                [stream:0 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                -> "30102000.ticsi2rx context 3":0 [ENABLED,IMMUTABLE]
        pad5: Source
                -> "30102000.ticsi2rx context 4":0 [ENABLED,IMMUTABLE]
        pad6: Source
                -> "30102000.ticsi2rx context 5":0 [ENABLED,IMMUTABLE]

- entity 9: cdns_csi2rx.30101000.csi-bridge (5 pads, 2 links, 4 routes)
            type V4L2 subdev subtype Unknown flags 0
            device node name /dev/v4l-subdev1
        routes:
                0/0 -> 1/0 [ACTIVE]
                0/1 -> 1/1 [ACTIVE]
                0/2 -> 1/2 [ACTIVE]
                0/3 -> 1/3 [ACTIVE]
        pad0: Sink
                [stream:0 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                [stream:1 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                [stream:2 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                [stream:3 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                <- "m00_dToF_ads6311 spi1.0":0 [ENABLED,IMMUTABLE]
        pad1: Source
                [stream:0 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                [stream:1 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                [stream:2 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                [stream:3 fmt:UYVY8_1X16/640x480 field:none colorspace:srgb xfer:srgb ycbcr:601 quantization:lim-range]
                -> "30102000.ticsi2rx":0 [ENABLED,IMMUTABLE]
        pad2: Source
        pad3: Source
        pad4: Source

- entity 15: m00_dToF_ads6311 spi1.0 (1 pad, 1 link, 4 routes)
             type V4L2 subdev subtype Sensor flags 0
             device node name /dev/v4l-subdev2
        routes:
                0/0 -> 0/0 [ACTIVE]
                0/0 -> 0/1 [ACTIVE]
                0/0 -> 0/2 [ACTIVE]
                0/0 -> 0/3 [ACTIVE]
        pad0: Source
                -> "cdns_csi2rx.30101000.csi-bridge":0 [ENABLED,IMMUTABLE]

- entity 21: 30102000.ticsi2rx context 0 (1 pad, 1 link)
             type Node subtype V4L flags 0
             device node name /dev/video0
        pad0: Sink
                <- "30102000.ticsi2rx":1 [ENABLED,IMMUTABLE]

- entity 27: 30102000.ticsi2rx context 1 (1 pad, 1 link)
             type Node subtype V4L flags 0
             device node name /dev/video1
        pad0: Sink
                <- "30102000.ticsi2rx":2 [ENABLED,IMMUTABLE]

- entity 33: 30102000.ticsi2rx context 2 (1 pad, 1 link)
             type Node subtype V4L flags 0
             device node name /dev/video2
        pad0: Sink
                <- "30102000.ticsi2rx":3 [ENABLED,IMMUTABLE]

- entity 39: 30102000.ticsi2rx context 3 (1 pad, 1 link)
             type Node subtype V4L flags 0
             device node name /dev/video3
        pad0: Sink
                <- "30102000.ticsi2rx":4 [ENABLED,IMMUTABLE]

- entity 45: 30102000.ticsi2rx context 4 (1 pad, 1 link)
             type Node subtype V4L flags 0
             device node name /dev/video4
        pad0: Sink
                <- "30102000.ticsi2rx":5 [ENABLED,IMMUTABLE]

- entity 51: 30102000.ticsi2rx context 5 (1 pad, 1 link)
             type Node subtype V4L flags 0
             device node name /dev/video5
        pad0: Sink
                <- "30102000.ticsi2rx":6 [ENABLED,IMMUTABLE]

dmesg

--on:1, dbg_ctrl:0x0, load_script: 0, ROI_data_src: 2 (from builtin_dot_h_file), lens_type: 0 (WIDEANGLE), workMode:6 (PCM), linkFreq:500000000, hawk_eco_version:ECO2, is_early_chip: 0, prbs_seed:0x34, mcu_chipid:0, mcu_fw_type: 0x0, drv_version: 3.2.0_LM20240904a---
[ 1120.812876] ------TRACE_PM_RUNTIME--Begin of <__sensor_power_on> Line:4063--power_on:0, callline: 3537, sensor->power_on_times: 5---
[ 1120.937606] rpmsg wait for completion time-out for 100 ms.
[ 1120.937628] rpmsg_tx: 00000000: 00 09 00 8a 00 06 8a                             .......
[ 1120.937635] rpmsg Write mcu reg:0x008a failed, ret:-110, rpmsg_from_user: 0
[ 1120.937641] Fail to set rx work mode: 6.
[ 1120.940616] Success for sensor reset cost 1863 us, sensor chip id: 0x3400
[ 1120.940762] ------TRACE_PM_RUNTIME--End of <__sensor_power_on> Line:4228--power_on:1, load_script: 0, ret: 0, sensorId: 0x3400---
[ 1120.941160] client: fdls192_lidar_p (1215:1215) is running on CPU 3, ctrl->id: 0x9f0901, ctrl->val: 1, V4L2_CID_EXPOSURE: 0x980911
[ 1120.941168] sensor_set_ctrl Unhandled id:0x9f0901, val:0x1
[ 1120.941174] client: fdls192_lidar_p (1215:1215) is running on CPU 3, ctrl->id: 0x980911, ctrl->val: 100, V4L2_CID_EXPOSURE: 0x980911
[ 1120.941180] use roi sram from camxhawk-roi-mem.h, workMode: PCM, roi_data: 0000000050c4c83f, roi sram length: 6976
[ 1120.946999] ------TRACE_PM_RUNTIME--Begin of <__sensor_power_on> Line:4063--power_on:1, callline: 3706, sensor->power_on_times: 6---
[ 1120.947009] It is power on already.
[ 1126.178728] --on:0, dbg_ctrl:0x0, load_script: 0, ROI_data_src: 2 (from builtin_dot_h_file), lens_type: 0 (WIDEANGLE), workMode:6 (PCM), linkFreq:500000000, hawk_eco_version:ECO2, is_early_chip: 0, prbs_seed:0x2a, mcu_chipid:0, mcu_fw_type: 0x0, drv_version: 3.2.0_LM20240904a---
[ 1126.178764] ------TRACE_PM_RUNTIME--Begin of <__sensor_power_off> Line:4239--power_on:1, callline: 3830---
[ 1126.182623] ------TRACE_PM_RUNTIME--End of <__sensor_power_off> Line:4280--power_on:0---
[ 1133.097582] client: fdls192_lidar_p (1215:1215) is running on CPU 3, pad: 0, name:m00_dToF_ads6311 spi1.0, fd: 000000004fc3710c
[ 1133.097655] num_routes: 4, source_pad: 0, source_stream: 0
[ 1133.097662] num_entries: 0, w*h: 32 * 192, code: 0x3008, bpp: 16
[ 1133.097668] num_routes: 4, source_pad: 0, source_stream: 1
[ 1133.097673] num_entries: 1, w*h: 480 * 128, code: 0x3008, bpp: 16
[ 1133.097678] num_routes: 4, source_pad: 0, source_stream: 2
[ 1133.097682] num_entries: 2, w*h: 40 * 1, code: 0x3001, bpp: 8
[ 1133.097687] num_routes: 4, source_pad: 0, source_stream: 3
[ 1133.097691] num_entries: 3, w*h: 40 * 1, code: 0x3001, bpp: 8
[ 1133.097850] j721e-csi2rx 30102000.ticsi2rx: enable multi stream: 1 from 4.
[ 1133.097878] client: fdls192_lidar_p (1215:1215) is running on CPU 3
[ 1133.097888] ---- S_OPEN  from fdls192_lidar_p (1215:1215) on CPU 3
[ 1133.097900] client: fdls192_lidar_p (1215:1215) subframe_count: 32, load_script: 0, streaming: 0, pad: 0, stream: 0, code: 0x0, state: 00000000361cf332, which:0x1, width:32, height:192
[ 1133.097921] fmt->which: 1, pad: 0, stream: 0, code: 0x3008, w*h: 32*192.
[ 1133.097931] ---- S_CLOSE  from fdls192_lidar_p (1215:1215) on CPU 3
[ 1133.097951] client: fdls192_lidar_p (1215:1215) is running on CPU 3
[ 1133.097958] ---- S_OPEN  from fdls192_lidar_p (1215:1215) on CPU 3
[ 1133.097966] client: fdls192_lidar_p (1215:1215) subframe_count: 32, load_script: 0, streaming: 0, pad: 0, stream: 0, code: 0x0, state: 00000000361cf332, which:0x1, width:32, height:192
[ 1133.097978] fmt->which: 1, pad: 0, stream: 0, code: 0x3008, w*h: 32*192.
[ 1133.097987] ---- S_CLOSE  from fdls192_lidar_p (1215:1215) on CPU 3
[ 1133.112073] client: fdls192_lidar_p (1215:1215) is running on CPU 3, pad: 0, name:m00_dToF_ads6311 spi1.0, fd: 00000000f9118d29
[ 1133.112098] num_routes: 4, source_pad: 0, source_stream: 0
[ 1133.112103] num_entries: 0, w*h: 32 * 192, code: 0x3008, bpp: 16
[ 1133.112109] num_routes: 4, source_pad: 0, source_stream: 1
[ 1133.112113] num_entries: 1, w*h: 480 * 128, code: 0x3008, bpp: 16
[ 1133.112118] num_routes: 4, source_pad: 0, source_stream: 2
[ 1133.112123] num_entries: 2, w*h: 40 * 1, code: 0x3001, bpp: 8
[ 1133.112128] num_routes: 4, source_pad: 0, source_stream: 3
[ 1133.112132] num_entries: 3, w*h: 40 * 1, code: 0x3001, bpp: 8
[ 1133.112562] client: fdls192_lidar_p (1215:1215) is running on CPU 3, pad: 0, name:m00_dToF_ads6311 spi1.0, fd: 00000000f9118d29
[ 1133.112574] num_routes: 4, source_pad: 0, source_stream: 0
[ 1133.112579] num_entries: 0, w*h: 32 * 192, code: 0x3008, bpp: 16
[ 1133.112584] num_routes: 4, source_pad: 0, source_stream: 1
[ 1133.112588] num_entries: 1, w*h: 480 * 128, code: 0x3008, bpp: 16
[ 1133.112593] num_routes: 4, source_pad: 0, source_stream: 2
[ 1133.112597] num_entries: 2, w*h: 40 * 1, code: 0x3001, bpp: 8
[ 1133.112602] num_routes: 4, source_pad: 0, source_stream: 3
[ 1133.112606] num_entries: 3, w*h: 40 * 1, code: 0x3001, bpp: 8
[ 1133.112639] client: fdls192_lidar_p (1215:1215) is running on CPU 3, pad: 0, name:m00_dToF_ads6311 spi1.0, fd: 00000000f9118d29
[ 1133.112647] num_routes: 4, source_pad: 0, source_stream: 0
[ 1133.112651] num_entries: 0, w*h: 32 * 192, code: 0x3008, bpp: 16
[ 1133.112656] num_routes: 4, source_pad: 0, source_stream: 1
[ 1133.112659] num_entries: 1, w*h: 480 * 128, code: 0x3008, bpp: 16
[ 1133.112665] num_routes: 4, source_pad: 0, source_stream: 2
[ 1133.112668] num_entries: 2, w*h: 40 * 1, code: 0x3001, bpp: 8
[ 1133.112673] num_routes: 4, source_pad: 0, source_stream: 3
[ 1133.112677] num_entries: 3, w*h: 40 * 1, code: 0x3001, bpp: 8
[ 1133.112708] client: fdls192_lidar_p (1215:1215) is running on CPU 3, pad: 0, name:m00_dToF_ads6311 spi1.0, fd: 00000000f9118d29
[ 1133.112716] num_routes: 4, source_pad: 0, source_stream: 0
[ 1133.112720] num_entries: 0, w*h: 32 * 192, code: 0x3008, bpp: 16
[ 1133.112725] num_routes: 4, source_pad: 0, source_stream: 1
[ 1133.112729] num_entries: 1, w*h: 480 * 128, code: 0x3008, bpp: 16
[ 1133.112734] num_routes: 4, source_pad: 0, source_stream: 2
[ 1133.112738] num_entries: 2, w*h: 40 * 1, code: 0x3001, bpp: 8
[ 1133.112742] num_routes: 4, source_pad: 0, source_stream: 3
[ 1133.112746] num_entries: 3, w*h: 40 * 1, code: 0x3001, bpp: 8
[ 1133.112769] Function csi2rx_enable_streams  at line 494
[ 1133.112777] --on:1, dbg_ctrl:0x0, load_script: 0, ROI_data_src: 2 (from builtin_dot_h_file), lens_type: 0 (WIDEANGLE), workMode:6 (PCM), linkFreq:500000000, hawk_eco_version:ECO2, is_early_chip: 0, prbs_seed:0x2a, mcu_chipid:0, mcu_fw_type: 0x0, drv_version: 3.2.0_LM20240904a---
[ 1133.112790] ------TRACE_PM_RUNTIME--Begin of <__sensor_power_on> Line:4063--power_on:0, callline: 3537, sensor->power_on_times: 6---
[ 1133.237607] rpmsg wait for completion time-out for 100 ms.
[ 1133.237629] rpmsg_tx: 00000000: 00 0a 00 8a 00 06 89                             .......
[ 1133.237636] rpmsg Write mcu reg:0x008a failed, ret:-110, rpmsg_from_user: 0
[ 1133.237642] Fail to set rx work mode: 6.
[ 1133.240531] Success for sensor reset cost 1776 us, sensor chip id: 0x3400
[ 1133.240677] ------TRACE_PM_RUNTIME--End of <__sensor_power_on> Line:4228--power_on:1, load_script: 0, ret: 0, sensorId: 0x3400---
[ 1133.241075] client: fdls192_lidar_p (1215:1215) is running on CPU 3, ctrl->id: 0x9f0901, ctrl->val: 1, V4L2_CID_EXPOSURE: 0x980911
[ 1133.241083] sensor_set_ctrl Unhandled id:0x9f0901, val:0x1
[ 1133.241090] client: fdls192_lidar_p (1215:1215) is running on CPU 3, ctrl->id: 0x980911, ctrl->val: 100, V4L2_CID_EXPOSURE: 0x980911
[ 1133.241096] use roi sram from camxhawk-roi-mem.h, workMode: PCM, roi_data: 0000000050c4c83f, roi sram length: 6976
[ 1133.246917] ------TRACE_PM_RUNTIME--Begin of <__sensor_power_on> Line:4063--power_on:1, callline: 3706, sensor->power_on_times: 7---
[ 1133.246929] It is power on already.
[ 1713.600714] client: media-ctl (4363:4363) is running on CPU 0
[ 1713.600753] ---- S_OPEN  from media-ctl (4363:4363) on CPU 0
[ 1713.601176] ---- S_CLOSE  from media-ctl (4363:4363) on CPU 0
[ 1738.238009] client: media-ctl (4408:4408) is running on CPU 1
[ 1738.238056] ---- S_OPEN  from media-ctl (4408:4408) on CPU 1

thank!

  • dma Error code 21

    j721e-csi2rx 30102000.ticsi2rx: Failed DMA transfer for frame#1 of (stream: 1 idx: 1, vc: 1),timestamp: 142340060875 result=21, residue=0
    

    Meanwhile, another error situation was also discovered: no error code was detected but the frame rate did not match

    root@am62pxx-evm:~# cat /sys/devices/platform/bus@f0000/30102000.ticsi2rx/csi_rx_status
    stream[0] ok: 15727, ng: 0, total_fps: 374, ok_fps: 374
    stream[1] ok: 15728, ng: 0, total_fps: 374, ok_fps: 374
    stream[2] ok: 15727, ng: 0, total_fps: 374, ok_fps: 374
    stream[3] ok: 15727, ng: 0, total_fps: 374, ok_fps: 374
    stream[4] ok: 0, ng: 0, total_fps: 0, ok_fps: 0
    stream[5] ok: 0, ng: 0, total_fps: 0, ok_fps: 0
    

  • Hello Xiangxu,

    Are you trying to modify the CSI Rx driver to receive the 4 streams? I would recommend to use the 4 video device nodes to receive data from the 4 streams in user space.

    Regards,

    Jianzhong

  • Yes, in order to keep the four links in the same frame, a judgment was made when the stream was opened, and the capture was only started when the last video was streamed,So far, there are only four video streams. You can check the Link above me。

    static int csi2rx_enable_streams(struct v4l2_subdev *subdev,
    				 struct v4l2_subdev_state *state, u32 pad,
    				 u64 streams_mask)
    {
    	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
    	struct media_pad *remote_pad;
    	u64 sink_streams;
    	int ret;
    
    	remote_pad = media_pad_remote_pad_first(&csi2rx->pads[CSI2RX_PAD_SINK]);
    	if (!remote_pad) {
    		dev_err(csi2rx->dev,
    			"Failed to find connected source\n");
    		return -ENODEV;
    	}
    
    	ret = pm_runtime_resume_and_get(csi2rx->dev);
    	if (ret < 0)
    		return ret;
    
    	sink_streams = v4l2_subdev_state_xlate_streams(state,
    						       CSI2RX_PAD_SOURCE_STREAM0,
    						       CSI2RX_PAD_SINK,
    						       &streams_mask);
    
    	mutex_lock(&csi2rx->lock);
    	/*
    	 * If we're not the first users, there's no need to
    	 * enable the whole controller.
    	 */
    	if (!csi2rx->count) {
    		ret = csi2rx_start(csi2rx);
    		if (ret)
    			goto err_stream_start;
    	}
    
    	/* Start streaming on the source */
    	if ((sink_streams == BIT(3)) || (sink_streams == 0xf)) {
    		printk( "Function %s  at line %d\n", __func__, __LINE__);
    		ret = v4l2_subdev_enable_streams(csi2rx->source_subdev, remote_pad->index,
    						 sink_streams);
    		if (ret) {
    			dev_err(csi2rx->dev,
    				"Failed to start streams %#llx on subdev\n",
    				sink_streams);
    			goto err_subdev_enable;
    		}
    	}
    	csi2rx->count++;
    	mutex_unlock(&csi2rx->lock);
    
    	return 0;
    
    err_subdev_enable:
    	if (!csi2rx->count)
    		csi2rx_stop(csi2rx);
    err_stream_start:
    	mutex_unlock(&csi2rx->lock);
    	pm_runtime_put(csi2rx->dev);
    	return ret;
    }
    
    static int csi2rx_disable_streams(struct v4l2_subdev *subdev,
    				  struct v4l2_subdev_state *state, u32 pad,
    				  u64 streams_mask)
    {
    	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
    	struct media_pad *remote_pad;
    	u64 sink_streams;
    	int ret;
    
    //	csi2rx_print_status(csi2rx);
    	sink_streams = v4l2_subdev_state_xlate_streams(state,
    						       CSI2RX_PAD_SOURCE_STREAM0,
    						       CSI2RX_PAD_SINK,
    						       &streams_mask);
    
    	remote_pad = media_pad_remote_pad_first(&csi2rx->pads[CSI2RX_PAD_SINK]);
    	if (!remote_pad) {
    		dev_err(csi2rx->dev,
    			"Failed to find connected source\n");
    		return -ENODEV;
    	}
    
    	/* stop streaming on the source */
    	if ((sink_streams == BIT(3)) || (sink_streams == 0xf)) {
    		ret = v4l2_subdev_disable_streams(csi2rx->source_subdev, remote_pad->index,
    						 sink_streams);
    		if (ret) {
    			dev_err(csi2rx->dev,
    				"Failed %d to stop streams %#llx on subdev\n", ret, sink_streams);
    		}
    	}
    
    	mutex_lock(&csi2rx->lock);
    	csi2rx->count--;
    	/*
    	 * Let the last user turn off the lights.
    	 */
    	if (!csi2rx->count)
    		csi2rx_stop(csi2rx);
    	mutex_unlock(&csi2rx->lock);
    
    	pm_runtime_put(csi2rx->dev);
    
    	return 0;
    }

  • hi:

    We determine whether it is the same frame data by capturing the timestamp and the number of frames. A timestamp exceeding 1000 indicates that it is not the same frame。The following are four experiments

    				VC0        VC1       VC2       VC3               sequence
    TIME1:				
    FrameTimeUsec =90521918   90520355  90521918  90521918      sequence 1 1 1 1
    FrameTimeUsec =90523472   90521940  90523472  90523473      sequence 2 2 2 2
    FrameTimeUsec =90525036   90523472  90525031  90525030      sequence 3 3 3 3
    FrameTimeUsec =90526589   90525030  90526589  90526589      sequence 4 4 4 4
    FrameTimeUsec =90528147   90526588  90528152  90528146      sequence 5 5 5 5
    FrameTimeUsec =90529703   90528146  90529708  90529703      sequence 6 6 6 6
    FrameTimeUsec =90531260   90529702  90531260  90531261      sequence 7 7 7 7
    FrameTimeUsec =90532816   90531260  90532816  90532817      sequence 8 8 8 8
    FrameTimeUsec =90534375   90532816  90534379  90534375      sequence 9 9 9 9
    FrameTimeUsec =90535935   90534374  90535935  90535935      sequence 10 10 10 10
    
    
    
    TIME2:
    FrameTimeUsec =363885079   364024329  364024332  364024329  sequence 0 0 0 0
    FrameTimeUsec =364024329   364025881  364025881  364025876  sequence 1 1 1 1
    FrameTimeUsec =364025882   364027436  364027436  364027436  sequence 2 2 2 2
    FrameTimeUsec =364027436   364029000  364029000  364029000  sequence 3 3 3 3
    FrameTimeUsec =364029000   364030557  364030557  364030557  sequence 4 4 4 4
    FrameTimeUsec =364030557   364032108  364032114  364032108  sequence 5 5 5 5
    FrameTimeUsec =364032108   364033668  364033667  364033667  sequence 6 6 6 6
    FrameTimeUsec =364033668   364035226  364035229  364035226  sequence 7 7 7 7
    FrameTimeUsec =364035226   364036783  364036786  364036782  sequence 8 8 8 8
    FrameTimeUsec =364036783   364038343  364038346  364038342  sequence 9 9 9 9
    FrameTimeUsec =364038342   364039902  364039904  364039902  sequence 10 10 10 10
    
    
    TIME3:
    FrameTimeUsec =119025879   119163875  119164304  119164304  sequence 0 0 0 0
    FrameTimeUsec =119025894   119164301  119165860  119165860  sequence 1 1 1 1
    FrameTimeUsec =119164302   119165859  119167418  119167415  sequence 2 2 2 2
    FrameTimeUsec =119165860   119167414  119168970  119168971  sequence 3 3 3 3
    FrameTimeUsec =119167414   119168970  119170531  119170531  sequence 4 4 4 4
    FrameTimeUsec =119168970   119170531  119172086  119172086  sequence 5 5 5 5
    FrameTimeUsec =119170531   119172084  119173643  119173643  sequence 6 6 6 6
    FrameTimeUsec =119172085   119173643  119175200  119175200  sequence 7 7 7 7
    FrameTimeUsec =119173643   119175200  119176753  119176754  sequence 8 8 8 8
    FrameTimeUsec =119175200   119176753  119178317  119178314  sequence 9 9 9 9
    FrameTimeUsec =119176753   119178313  119179869  119179870  sequence 10 10 10 10
    
    
    TIME4:
    FrameTimeUsec =1057356156   1057495980  1057496410  1057496410  sequence 0 0 0 0
    FrameTimeUsec =1057356173   1057496407  1057498054  1057498054  sequence 1 1 1 1
    FrameTimeUsec =1057496408   1057498053  1057499698  1057499697  sequence 2 2 2 2
    FrameTimeUsec =1057498054   1057499697  1057501342  1057501341  sequence 3 3 3 3
    FrameTimeUsec =1057499697   1057501341  1057502986  1057502986  sequence 4 4 4 4
    FrameTimeUsec =1057501341   1057502986  1057504629  1057504628  sequence 5 5 5 5
    FrameTimeUsec =1057502986   1057504628  1057506271  1057506270  sequence 6 6 6 6
    FrameTimeUsec =1057504628   1057506271  1057507920  1057507918  sequence 7 7 7 7
    FrameTimeUsec =1057506271   1057507918  1057509561  1057509561  sequence 8 8 8 8
    FrameTimeUsec =1057507918   1057509561  1057511207  1057511207  sequence 9 9 9 9
    FrameTimeUsec =1057509561   1057511207  1057512855  1057512852  sequence 10 10 10 10

  • hi:

    video release code:

    XCamReturn v4l2_hal_raw_uapi_stop_capture()
    {
        LOG_DEBUG("enter\n");
    
        //stop capture thread
        g_scapinfo.enable_capture = false;
    
        //wait capture thread exit
        while (g_scapinfo.capture_loop_state) {
            usleep(1000 * 2); //sleep 2ms
        }
    
        sem_destroy(&g_semSync);
        g_sfCaptureFrameCostTime = 0;
        g_sullCaptureFrameCount = 0;
        LOG_INFO("capture thread had exited\n");
    
        if (g_processFrameCB) {
            g_processFrameCB = NULL;
        }
    
        stop_capturing(&g_scapinfo);
        uninit_device(&g_scapinfo);
        RawCaptureDeinit();
    
        LOG_DEBUG("exit\n");
        return XCAM_RETURN_NO_ERROR;
    }
    
    void stop_capturing(struct capture_info* cap_info)
    {
        enum v4l2_buf_type type;
    
        for (int index = 0; index < CIF_DEVICE_NUMBER; index++) {
            switch (cap_info->io) {
                case V4L2_MEMORY_MMAP:
                case V4L2_MEMORY_USERPTR:
                case V4L2_MEMORY_DMABUF:
                    if (cap_info->video_info[index].dev_fd > 0) {
                        type = cap_info->video_info[index].capture_buf_type;
                        device_streamoff(cap_info->video_info[index].dev_fd, &type);
                    }
                    break;
            }
        }
    
        LOG_DEBUG("#### stop_capturing #### \n");
    }
    int device_streamoff(int dev_fd, enum v4l2_buf_type* type)
    {
        int ret = xioctl(dev_fd, VIDIOC_STREAMOFF, type);
        if (-1 == ret) {
            errno_debug("VIDIOC_STREAMOFF");
        }
        return ret;
    }
    
    void uninit_device(struct capture_info* cap_info)
    {
        unsigned int i,ret;
        struct v4l2_requestbuffers rb;
        for (int index = 0; index < CIF_DEVICE_NUMBER; index++) {
            uninit_io_method(cap_info->io, &cap_info->video_info[index]);
            memset(&rb, 0, sizeof rb);
            
            rb.count = 4;
            rb.type = cap_info->video_info[index].capture_buf_type;
            rb.memory = V4L2_MEMORY_USERPTR;
    
            ret = ioctl(cap_info->video_info[index].dev_fd, VIDIOC_REQBUFS, &rb);
            if (ret < 0) {
                printf("Unable to release buffers: %s (%d).\n",
                    strerror(errno), errno);
            }
          
            if ((g_strCapImageList.strImageList[index].pAddr) && (cap_info->enableCopy)) {
                free(g_strCapImageList.strImageList[index].pAddr);
                g_strCapImageList.strImageList[index].pAddr  = NULL;
                g_strCapImageList.strImageList[index].length = 0;
                g_strCapImageList.imageCount                 = 0;
            }
    
            if (cap_info->video_info[index].v4l2_buf[0]) {
                if (cap_info->video_info[index].v4l2_buf[0] < cap_info->video_info[index].v4l2_buf[1]) {
                    free(cap_info->video_info[index].v4l2_buf[0]);
                    cap_info->video_info[index].v4l2_buf[0] = NULL;
                    cap_info->video_info[index].v4l2_buf[1] = NULL;
                } else {
                     free(cap_info->video_info[index].v4l2_buf[1]);
                     cap_info->video_info[index].v4l2_buf[0] = NULL;
                     cap_info->video_info[index].v4l2_buf[1] = NULL;
                }
            }
    
            if (cap_info->video_info[index].buffers) {
                free(cap_info->video_info[index].buffers);
                cap_info->video_info[index].buffers = NULL;
            }
    
            if (cap_info->video_info[index].dev_fd > 0) {
                device_close(cap_info->video_info[index].dev_fd);
                cap_info->video_info[index].dev_fd = -1;
            }
        }
    }
    
    int uninit_io_method(enum v4l2_memory io, struct video_device_info* video_info)
    {
        switch (io) {
            case V4L2_MEMORY_MMAP:
                uninit_mmap(video_info);
                break;
            case V4L2_MEMORY_USERPTR:
                uninit_userp(video_info);
                break;
            case V4L2_MEMORY_DMABUF:
                uninit_dmabuf(video_info);
                break;
        }
        return 0;
    }
    void uninit_userp(struct video_device_info* video_info)
    {
        for (uint8_t i = 0; i < video_info->n_buffers; ++i) {
            if (video_info->buffers[i].start) {
                free(video_info->buffers[i].start);
                video_info->buffers[i].start = NULL;
            }
        }
    }
    
    void device_close(int dev_fd)
    {
        if (-1 == close(dev_fd)) {
            errno_debug("close");
        }
        dev_fd = -1;
    }
    
    static void RawCaptureDeinit()
    {
        if (g_scapinfo.subdev_fd > 0) {
            device_close(g_scapinfo.subdev_fd);
            g_scapinfo.subdev_fd = -1;
            LOG_DEBUG("device_close(g_scapinfo.subdev_fd)\n");
        }
    
        for (int index = 0; index < CIF_DEVICE_NUMBER; index++) {
            if (g_scapinfo.video_info[index].dev_fd > 0) {
                device_close(g_scapinfo.video_info[index].dev_fd);
                g_scapinfo.video_info[index].dev_fd = -1;
                LOG_DEBUG("device_close(%s)\n", g_scapinfo.video_info[index].dev_name);
            }
        }
    }
    
    
    

  • Hi Jianzhong,

    This issue is reproduced in test-case "streaming off" + "start streaming on"

    it can see the the timestamp of 4 VC is not in reasonable range.

    eg:

                                        VC0      VC1          VC2          VC3         sequence
    TIME1:
    FrameTimeUsec =90521918 90520355 90521918 90521918    sequence 1 1 1 1 

    the timestamp of VC1 is earlier than VC0/VC2/VC3

    this timestamp is capture through function "ti_csi2rx_dma_callback"

    buf->vb.vb2_buf.timestamp = ktime_get_ns();

    please help to check if there is any possibility related this issue?  

    I also asked customer to reproduce this issue in case "power on am62p" + "streaming on" + "power off am62p" to check if this issue is still there.

    Regards

    Joe

  • Hi Jianzhong,

    Customer tested the use-case  "power on am62p" + "streaming on" + "power off am62p", it can not reproduce this issue in one hour.

    please help to check the software process video stream off and stream on. thanks.

    Regards

    Joe 

  • Hi Joe, Xiangxu,

    Based on the description, I think this may be a DMA drain issue. We improved the DMA drain mechanism in SDK 11.0 release. Can you try the latest SDK (11.0): https://www.ti.com/tool/PROCESSOR-SDK-AM62P? 

    To see the updates in 11.0, you can do a diff on j721e-csi2rx.c between the SDK version you have and SDK 11.0: https://git.ti.com/cgit/ti-linux-kernel/ti-linux-kernel/tree/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c?h=ti-linux-6.12.y.

    Regards,

    Jianzhong

  • // SPDX-License-Identifier: GPL-2.0-only
    /*
     * TI CSI2RX Shim Wrapper Driver
     *
     * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
     *
     * Author: Pratyush Yadav <p.yadav@ti.com>
     */
    
    #include <linux/bitfield.h>
    #include <linux/dmaengine.h>
    #include <linux/module.h>
    #include <linux/of_platform.h>
    #include <linux/platform_device.h>
    #include <linux/pm_runtime.h>
    #include <linux/adaps_dtof_uapi.h>
    
    #include <media/mipi-csi2.h>
    #include <media/v4l2-device.h>
    #include <media/v4l2-ioctl.h>
    #include <media/v4l2-mc.h>
    //#include <media/videobuf2-dma-contig.h>
    #include <media/videobuf2-dma-sg.h>
    #define TI_CSI2RX_MODULE_NAME		"j721e-csi2rx"
    
    #define SHIM_CNTL			0x10
    #define SHIM_CNTL_PIX_RST		BIT(0)
    
    #define SHIM_DMACNTX(i)			(0x20 + ((i) * 0x20))
    #define SHIM_DMACNTX_EN			BIT(31)
    #define SHIM_DMACNTX_YUV422		GENMASK(27, 26)
    #define SHIM_DMACNTX_DUAL_PCK_CFG	BIT(24)
    #define SHIM_DMACNTX_SIZE		GENMASK(21, 20)
    #define SHIM_DMACNTX_VC			GENMASK(9, 6)
    #define SHIM_DMACNTX_FMT		GENMASK(5, 0)
    #define SHIM_DMACNTX_YUV422_MODE_11	3
    #define SHIM_DMACNTX_SIZE_8		0
    #define SHIM_DMACNTX_SIZE_16		1
    #define SHIM_DMACNTX_SIZE_32		2
    
    #define SHIM_PSI_CFG0(i)		(0x24 + ((i) * 0x20))
    #define SHIM_PSI_CFG0_SRC_TAG		GENMASK(15, 0)
    #define SHIM_PSI_CFG0_DST_TAG		GENMASK(31, 16)
    
    #define TI_CSI2RX_MAX_PIX_PER_CLK	4
    #define PSIL_WORD_SIZE_BYTES		16
    #define TI_CSI2RX_MAX_CTX		32
    
    /*
     * There are no hard limits on the width or height. The DMA engine can handle
     * all sizes. The max width and height are arbitrary numbers for this driver.
     * Use 16K * 16K as the arbitrary limit. It is large enough that it is unlikely
     * the limit will be hit in practice.
     */
    #define MAX_WIDTH_BYTES			SZ_16K
    #define MAX_HEIGHT_LINES		SZ_16K
    
    #define TI_CSI2RX_PAD_SINK		0
    #define TI_CSI2RX_PAD_FIRST_SOURCE	1
    #define TI_CSI2RX_MAX_SOURCE_PADS	TI_CSI2RX_MAX_CTX
    #define TI_CSI2RX_MAX_PADS		(1 + TI_CSI2RX_MAX_SOURCE_PADS)
    
    #define DRAIN_BUFFER_SIZE		SZ_32K
    #define DRAIN_TIMEOUT_MS		250
    
    struct ti_csi2rx_fmt {
    	u32				fourcc;	/* Four character code. */
    	u32				code;	/* Mbus code. */
    	u32				csi_dt;	/* CSI Data type. */
    	u8				bpp;	/* Bits per pixel. */
    	u8				size;	/* Data size shift when unpacking. */
    };
    
    struct ti_csi2rx_buffer {
    	/* Common v4l2 buffer. Must be first. */
    	struct vb2_v4l2_buffer		vb;
    	struct list_head		list;
    	struct ti_csi2rx_ctx		*ctx;
    };
    
    enum ti_csi2rx_dma_state {
    	TI_CSI2RX_DMA_STOPPED,	/* Streaming not started yet. */
    	TI_CSI2RX_DMA_ACTIVE,	/* Streaming and pending DMA operation. */
    };
    
    struct ti_csi2rx_dma {
    	/* Protects all fields in this struct. */
    	spinlock_t			lock;
    	struct dma_chan			*chan;
    	/* Buffers queued to the driver, waiting to be processed by DMA. */
    	struct list_head		queue;
    	enum ti_csi2rx_dma_state	state;
    	/*
    	 * Queue of buffers submitted to DMA engine.
    	 */
    	struct list_head		submitted;
    };
    
    struct ti_csi2rx_dev;
    
    struct ti_csi2rx_ctx {
    	struct ti_csi2rx_dev		*csi;
    	struct video_device		vdev;
    	struct vb2_queue		vidq;
    	struct mutex			mutex; /* To serialize ioctls. */
    	struct v4l2_format		v_fmt;
    	struct ti_csi2rx_dma		dma;
    	struct media_pad		pad;
    	u32				sequence;
    	u32				idx;
    	u32				vc;
    	u32				stream;
    	u32				ok_frame_cnt;
    	u32				ng_frame_cnt;
    	ktime_t			rx_start;
    };
    
    struct ti_csi2rx_dev {
    	struct device			*dev;
    	void __iomem			*shim;
    	/* To serialize core subdev ioctls. */
    	struct mutex			mutex;
    	unsigned int			enable_count;
    	bool					enable_multi_stream;
    	unsigned int			num_ctx;
    	struct v4l2_async_notifier	notifier;
    	struct media_device		mdev;
    	struct media_pipeline		pipe;
    	struct media_pad		pads[TI_CSI2RX_MAX_PADS];
    	struct v4l2_device		v4l2_dev;
    	struct v4l2_subdev		*source;
    	struct v4l2_subdev		subdev;
    	struct ti_csi2rx_ctx		ctx[TI_CSI2RX_MAX_CTX];
    	struct notifier_block		pm_notifier;
    	u64				enabled_streams_mask;
    	u8				pix_per_clk;
    	/* Buffer to drain stale data from PSI-L endpoint */
    	struct {
    		void			*vaddr;
    		dma_addr_t		paddr;
    		size_t			len;
    	} drain;
    	struct completion drain_complete;
    };
    
    static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
    	{
    		.fourcc			= V4L2_PIX_FMT_SBGGR8,
    		.code			= MEDIA_BUS_FMT_SBGGR8_1X8,
    		.csi_dt			= MIPI_CSI2_DT_USER_DEFINED(0),
    		.bpp 			= 8,
    		.size			= SHIM_DMACNTX_SIZE_8,
    	},
    	{
    		.fourcc			= V4L2_PIX_FMT_YUYV,
    		.code			= MEDIA_BUS_FMT_YUYV8_1X16,
    		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_8,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_UYVY,
    		.code			= MEDIA_BUS_FMT_UYVY8_1X16,
    		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_8,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_YVYU,
    		.code			= MEDIA_BUS_FMT_YVYU8_1X16,
    		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_8,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_VYUY,
    		.code			= MEDIA_BUS_FMT_VYUY8_1X16,
    		.csi_dt			= MIPI_CSI2_DT_YUV422_8B,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_8,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SBGGR8,
    		.code			= MEDIA_BUS_FMT_SBGGR8_1X8,
    		.csi_dt			= MIPI_CSI2_DT_RAW8,
    		.bpp			= 8,
    		.size			= SHIM_DMACNTX_SIZE_8,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SGBRG8,
    		.code			= MEDIA_BUS_FMT_SGBRG8_1X8,
    		.csi_dt			= MIPI_CSI2_DT_RAW8,
    		.bpp			= 8,
    		.size			= SHIM_DMACNTX_SIZE_8,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SGRBG8,
    		.code			= MEDIA_BUS_FMT_SGRBG8_1X8,
    		.csi_dt			= MIPI_CSI2_DT_RAW8,
    		.bpp			= 8,
    		.size			= SHIM_DMACNTX_SIZE_8,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SRGGB8,
    		.code			= MEDIA_BUS_FMT_SRGGB8_1X8,
    		.csi_dt			= MIPI_CSI2_DT_RAW8,
    		.bpp			= 8,
    		.size			= SHIM_DMACNTX_SIZE_8,
    }, {
    }, {
    		.fourcc			= V4L2_PIX_FMT_GREY,
    		.code			= MEDIA_BUS_FMT_Y8_1X8,
    		.csi_dt			= MIPI_CSI2_DT_RAW8,
    		.bpp			= 8,
    		.size			= SHIM_DMACNTX_SIZE_8,
    	}, {
    	}, {
    		.fourcc			= V4L2_PIX_FMT_GREY,
    		.code			= MEDIA_BUS_FMT_Y8_1X8,
    		.csi_dt			= MIPI_CSI2_DT_RAW8,
    		.bpp			= 8,
    		.size			= SHIM_DMACNTX_SIZE_8,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SBGGR10,
    		.code			= MEDIA_BUS_FMT_SBGGR10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SGBRG10,
    		.code			= MEDIA_BUS_FMT_SGBRG10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SGRBG10,
    		.code			= MEDIA_BUS_FMT_SGRBG10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SRGGB10,
    		.code			= MEDIA_BUS_FMT_SRGGB10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SBGGR12,
    		.code			= MEDIA_BUS_FMT_SBGGR12_1X12,
    		.csi_dt			= MIPI_CSI2_DT_RAW12,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SGBRG12,
    		.code			= MEDIA_BUS_FMT_SGBRG12_1X12,
    		.csi_dt			= MIPI_CSI2_DT_RAW12,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SGRBG12,
    		.code			= MEDIA_BUS_FMT_SGRBG12_1X12,
    		.csi_dt			= MIPI_CSI2_DT_RAW12,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SRGGB12,
    		.code			= MEDIA_BUS_FMT_SRGGB12_1X12,
    		.csi_dt			= MIPI_CSI2_DT_RAW12,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SRGGI10,
    		.code			= MEDIA_BUS_FMT_SRGGI10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SGRIG10,
    		.code			= MEDIA_BUS_FMT_SGRIG10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SBGGI10,
    		.code			= MEDIA_BUS_FMT_SBGGI10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SGBIG10,
    		.code			= MEDIA_BUS_FMT_SGBIG10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SGIRG10,
    		.code			= MEDIA_BUS_FMT_SGIRG10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SIGGR10,
    		.code			= MEDIA_BUS_FMT_SIGGR10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SGIBG10,
    		.code			= MEDIA_BUS_FMT_SGIBG10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	}, {
    		.fourcc			= V4L2_PIX_FMT_SIGGB10,
    		.code			= MEDIA_BUS_FMT_SIGGB10_1X10,
    		.csi_dt			= MIPI_CSI2_DT_RAW10,
    		.bpp			= 16,
    		.size			= SHIM_DMACNTX_SIZE_16,
    	},
    
    	/* More formats can be supported but they are not listed for now. */
    };
    
    extern int cdns_csi2rx_negotiate_ppc(struct v4l2_subdev *subdev,
    				     unsigned int pad, u8 *ppc);
    
    /* Forward declaration needed by ti_csi2rx_dma_callback. */
    static int ti_csi2rx_start_dma(struct ti_csi2rx_ctx *ctx,
    			       struct ti_csi2rx_buffer *buf);
    
    /* Forward declarations needed by ti_csi2rx_drain_callback. */
    static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *ctx);
    static int ti_csi2rx_dma_submit_pending(struct ti_csi2rx_ctx *ctx);
    
    static const struct ti_csi2rx_fmt *find_format_by_fourcc(u32 pixelformat)
    {
    	unsigned int i;
    
    	for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
    		if (ti_csi2rx_formats[i].fourcc == pixelformat)
    			return &ti_csi2rx_formats[i];
    	}
    
    	return NULL;
    }
    
    static const struct ti_csi2rx_fmt *find_format_by_code(u32 code)
    {
    	unsigned int i;
    
    	for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
    		if (ti_csi2rx_formats[i].code == code)
    			return &ti_csi2rx_formats[i];
    	}
    
    	return NULL;
    }
    
    static void ti_csi2rx_fill_fmt(const struct ti_csi2rx_fmt *csi_fmt,
    			       struct v4l2_format *v4l2_fmt)
    {
    	struct v4l2_pix_format *pix = &v4l2_fmt->fmt.pix;
    	unsigned int pixels_in_word;
    	u8 bpp = csi_fmt->bpp;
    	u32 bpl;
    
    	pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / bpp;
    
    	pix->width = clamp_t(unsigned int, pix->width,
    			     pixels_in_word,
    			     MAX_WIDTH_BYTES * 8 / bpp);
    
    	// HACK: Allow non-16-aligned width
    	/*pix->width = rounddown(pix->width, pixels_in_word);*/
    
    	pix->height = clamp_t(unsigned int, pix->height, 1, MAX_HEIGHT_LINES);
    
    	v4l2_fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    	pix->pixelformat = csi_fmt->fourcc;
    	pix->colorspace = V4L2_COLORSPACE_SRGB;
    	pix->sizeimage = pix->height * pix->width * (bpp / 8);
    
    	bpl = (pix->width * ALIGN(bpp, 8)) >> 3;
    
    	// HACK: Allow non-16-aligned width
    	//pix->bytesperline = ALIGN(bpl, 16);
    }
    
    static int ti_csi2rx_querycap(struct file *file, void *priv,
    			      struct v4l2_capability *cap)
    {
    	strscpy(cap->driver, TI_CSI2RX_MODULE_NAME, sizeof(cap->driver));
    	strscpy(cap->card, TI_CSI2RX_MODULE_NAME, sizeof(cap->card));
    
    	return 0;
    }
    
    static int ti_csi2rx_enum_fmt_vid_cap(struct file *file, void *priv,
    				      struct v4l2_fmtdesc *f)
    {
    	const struct ti_csi2rx_fmt *fmt = NULL;
    
    	if (f->mbus_code) {
    	/* 1-to-1 mapping between bus formats and pixel formats */
    		if (f->index > 0)
    			return -EINVAL;
    
    		fmt = find_format_by_code(f->mbus_code);
    	} else {
    		if (f->index >= ARRAY_SIZE(ti_csi2rx_formats))
    			return -EINVAL;
    
    		fmt = &ti_csi2rx_formats[f->index];
    	}
    
    	if (!fmt)
    		return -EINVAL;
    
    	f->pixelformat = fmt->fourcc;
    	memset(f->reserved, 0, sizeof(f->reserved));
    	f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    
    	return 0;
    }
    
    static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *prov,
    				   struct v4l2_format *f)
    {
    	struct ti_csi2rx_ctx *csi = video_drvdata(file);
    
    	*f = csi->v_fmt;
    
    	return 0;
    }
    
    static int ti_csi2rx_try_fmt_vid_cap(struct file *file, void *priv,
    				     struct v4l2_format *f)
    {
    	const struct ti_csi2rx_fmt *fmt;
    
    	/*
    	 * Default to the first format if the requested pixel format code isn't
    	 * supported.
    	 */
    	fmt = find_format_by_fourcc(f->fmt.pix.pixelformat);
    	if (!fmt)
    		fmt = &ti_csi2rx_formats[0];
    
    		/* Interlaced formats are not supported. */
    		f->fmt.pix.field = V4L2_FIELD_NONE;
    
    	ti_csi2rx_fill_fmt(fmt, f);
    
    	return 0;
    }
    
    static int ti_csi2rx_s_fmt_vid_cap(struct file *file, void *priv,
    				   struct v4l2_format *f)
    {
    	struct ti_csi2rx_ctx *csi = video_drvdata(file);
    	struct vb2_queue *q = &csi->vidq;
    	int ret;
    
    	if (vb2_is_busy(q))
    		return -EBUSY;
    
    	ret = ti_csi2rx_try_fmt_vid_cap(file, priv, f);
    	if (ret < 0)
    		return ret;
    
    	csi->v_fmt = *f;
    
    	return 0;
    }
    
    static int ti_csi2rx_enum_framesizes(struct file *file, void *fh,
    				     struct v4l2_frmsizeenum *fsize)
    {
    	const struct ti_csi2rx_fmt *fmt;
    	unsigned int pixels_in_word;
    	u8 bpp;
    
    	fmt = find_format_by_fourcc(fsize->pixel_format);
    	if (!fmt || fsize->index != 0)
    		return -EINVAL;
    
    	bpp = ALIGN(fmt->bpp, 8);
    
    	/*
    	 * Number of pixels in one PSI-L word. The transfer happens in multiples
    	 * of PSI-L word sizes.
    	 */
    	pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / bpp;
    
    	fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
    	fsize->stepwise.min_width = pixels_in_word;
    	fsize->stepwise.max_width = rounddown(MAX_WIDTH_BYTES * 8 / bpp,
    					      pixels_in_word);
    
    	// HACK: Set step width to 1 to allow non-16 aligned transfers
    	fsize->stepwise.step_width = 1;
    	fsize->stepwise.min_height = 1;
    	fsize->stepwise.max_height = MAX_HEIGHT_LINES;
    	fsize->stepwise.step_height = 1;
    
    	return 0;
    }
    
    static const struct v4l2_ioctl_ops csi_ioctl_ops = {
    	.vidioc_querycap      = ti_csi2rx_querycap,
    	.vidioc_enum_fmt_vid_cap = ti_csi2rx_enum_fmt_vid_cap,
    	.vidioc_try_fmt_vid_cap = ti_csi2rx_try_fmt_vid_cap,
    	.vidioc_g_fmt_vid_cap = ti_csi2rx_g_fmt_vid_cap,
    	.vidioc_s_fmt_vid_cap = ti_csi2rx_s_fmt_vid_cap,
    	.vidioc_enum_framesizes = ti_csi2rx_enum_framesizes,
    	.vidioc_reqbufs       = vb2_ioctl_reqbufs,
    	.vidioc_create_bufs   = vb2_ioctl_create_bufs,
    	.vidioc_prepare_buf   = vb2_ioctl_prepare_buf,
    	.vidioc_querybuf      = vb2_ioctl_querybuf,
    	.vidioc_qbuf          = vb2_ioctl_qbuf,
    	.vidioc_dqbuf         = vb2_ioctl_dqbuf,
    	.vidioc_expbuf        = vb2_ioctl_expbuf,
    	.vidioc_streamon      = vb2_ioctl_streamon,
    	.vidioc_streamoff     = vb2_ioctl_streamoff,
    };
    
    static const struct v4l2_file_operations csi_fops = {
    	.owner = THIS_MODULE,
    	.open = v4l2_fh_open,
    	.release = vb2_fop_release,
    	.read = vb2_fop_read,
    	.poll = vb2_fop_poll,
    	.unlocked_ioctl = video_ioctl2,
    	.mmap = vb2_fop_mmap,
    };
    
    static int csi_async_notifier_bound(struct v4l2_async_notifier *notifier,
    				    struct v4l2_subdev *subdev,
    				    struct v4l2_async_connection *asc)
    {
    	struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
    
    	csi->source = subdev;
    
    	return 0;
    }
    
    static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier)
    {
    	struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
    	int ret, i;
    
    	/* Create link from source to subdev */
    	ret = v4l2_create_fwnode_links_to_pad(csi->source,
    					      &csi->pads[TI_CSI2RX_PAD_SINK],
    					      MEDIA_LNK_FL_IMMUTABLE |
    					      MEDIA_LNK_FL_ENABLED);
    	if (ret)
    		return ret;
    
    /* Create and link video nodes for all DMA contexts */
    	for (i = 0; i < csi->num_ctx; i++) {
    		struct ti_csi2rx_ctx *ctx = &csi->ctx[i];
    		struct video_device *vdev = &ctx->vdev;
    
    		ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
    		if (ret)
    			goto unregister_dev;
    
    		ret = media_create_pad_link(&csi->subdev.entity,
    					    TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
    					    &vdev->entity, 0,
    					    MEDIA_LNK_FL_IMMUTABLE |
    					    MEDIA_LNK_FL_ENABLED);
    		if (ret) {
    			video_unregister_device(vdev);
    			goto unregister_dev;
    		}
    	}
    
    	ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
    if (ret)
    		goto unregister_dev;
    
    	return 0;
    
    unregister_dev:
    	i--;
    	for (; i >= 0; i--) {
    		media_entity_remove_links(&csi->ctx[i].vdev.entity);
    		video_unregister_device(&csi->ctx[i].vdev);
    	}
    	return ret;
    }
    
    static const struct v4l2_async_notifier_operations csi_async_notifier_ops = {
    	.bound = csi_async_notifier_bound,
    	.complete = csi_async_notifier_complete,
    };
    
    static int ti_csi2rx_notifier_register(struct ti_csi2rx_dev *csi)
    {
    	struct fwnode_handle *fwnode;
    	struct v4l2_async_connection *asc;
    	struct device_node *node;
    	int ret;
    
    	node = of_get_child_by_name(csi->dev->of_node, "csi-bridge");
    	if (!node)
    		return -EINVAL;
    
    	fwnode = of_fwnode_handle(node);
    	if (!fwnode) {
    		of_node_put(node);
    		return -EINVAL;
    	}
    
    	v4l2_async_nf_init(&csi->notifier, &csi->v4l2_dev);
    	csi->notifier.ops = &csi_async_notifier_ops;
    
    	asc = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
    				       struct v4l2_async_connection);
    	of_node_put(node);
    	if (IS_ERR(asc)) {
    		v4l2_async_nf_cleanup(&csi->notifier);
    		return PTR_ERR(asc);
    	}
    
    	ret = v4l2_async_nf_register(&csi->notifier);
    	if (ret) {
    		v4l2_async_nf_cleanup(&csi->notifier);
    		return ret;
    	}
    
    	return 0;
    }
    
    /* Request maximum possible pixels per clock from the bridge */
    static void ti_csi2rx_request_max_ppc(struct ti_csi2rx_dev *csi)
    {
    	struct media_pad *pad;
    	int ret;
    	u8 ppc = TI_CSI2RX_MAX_PIX_PER_CLK;
    
    	pad = media_entity_remote_source_pad_unique(&csi->subdev.entity);
    	if (!pad)
    		return;
    
    	ret = cdns_csi2rx_negotiate_ppc(csi->source, pad->index, &ppc);
    	if (ret) {
    		dev_warn(csi->dev, "NUM_PIXELS negotiation failed: %d\n", ret);
    		csi->pix_per_clk = 1;
    	} else {
    		csi->pix_per_clk = ppc;
    	}
    }
    
    static void ti_csi2rx_setup_shim(struct ti_csi2rx_ctx *ctx)
    {
    	struct ti_csi2rx_dev *csi = ctx->csi;
    	const struct ti_csi2rx_fmt *fmt;
    	unsigned int reg;
    
    	fmt = find_format_by_fourcc(ctx->v_fmt.fmt.pix.pixelformat);
    	
    	/*
    	 * De-assert the pixel interface reset. Negotiate pixel count before
    	 * starting first stream on source
    	 */
    	mutex_lock(&csi->mutex);
    		if (!csi->enable_count) {
    		reg = SHIM_CNTL_PIX_RST;
    		writel(reg, csi->shim + SHIM_CNTL);
    	ti_csi2rx_request_max_ppc(csi);
    	}
    	mutex_unlock(&csi->mutex);
    	
    	reg = SHIM_DMACNTX_EN;
    	reg |= FIELD_PREP(SHIM_DMACNTX_FMT, fmt->csi_dt);
    
    	/*
    	 * The hardware assumes incoming YUV422 8-bit data on MIPI CSI2 bus
    	 * follows the spec and is packed in the order U0 -> Y0 -> V0 -> Y1 ->
    	 * ...
    	 *
    	 * There is an option to swap the bytes around before storing in
    	 * memory, to achieve different pixel formats:
    	 *
    	 * Byte3 <----------- Byte0
    	 * [ Y1 ][ V0 ][ Y0 ][ U0 ]	MODE 11
    	 * [ Y1 ][ U0 ][ Y0 ][ V0 ]	MODE 10
    	 * [ V0 ][ Y1 ][ U0 ][ Y0 ]	MODE 01
    	 * [ U0 ][ Y1 ][ V0 ][ Y0 ]	MODE 00
    	 *
    	 * We don't have any requirement to change pixelformat from what is
    	 * coming from the source, so we keep it in MODE 11, which does not
    	 * swap any bytes when storing in memory.
    	 */
    	switch (fmt->fourcc) {
    	case V4L2_PIX_FMT_UYVY:
    	case V4L2_PIX_FMT_VYUY:
    	case V4L2_PIX_FMT_YUYV:
    	case V4L2_PIX_FMT_YVYU:
    		reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
    					SHIM_DMACNTX_YUV422_MODE_11);
    	/* Multiple pixels are handled differently for packed YUV */
    		if (csi->pix_per_clk == 2)
    			reg |= SHIM_DMACNTX_DUAL_PCK_CFG;
    		reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
    		break;
    	default:
    		/* By default we change the shift size for multiple pixels */
    	reg |= FIELD_PREP(SHIM_DMACNTX_SIZE,
    				  fmt->size + (csi->pix_per_clk >> 1));
    				break;
    	}
    
    	reg |= FIELD_PREP(SHIM_DMACNTX_VC, ctx->vc);
    
    	writel(reg, csi->shim + SHIM_DMACNTX(ctx->idx));
    
    	reg = FIELD_PREP(SHIM_PSI_CFG0_SRC_TAG, 0) |
    	      FIELD_PREP(SHIM_PSI_CFG0_DST_TAG, 0);
    	writel(reg, csi->shim + SHIM_PSI_CFG0(ctx->idx));
    }
    
    static void ti_csi2rx_drain_callback(void *param)
    {
    	struct ti_csi2rx_ctx *ctx = param;
    	struct ti_csi2rx_dev *csi = ctx->csi;
    	struct ti_csi2rx_dma *dma = &ctx->dma;
    	unsigned long flags;
    
    	spin_lock_irqsave(&dma->lock, flags);
    
    	if (dma->state == TI_CSI2RX_DMA_STOPPED) {
    		writel(0, csi->shim + SHIM_DMACNTX(ctx->idx));
    		complete(&csi->drain_complete);
    
    		spin_unlock_irqrestore(&dma->lock, flags);
    		return;
    	}
    
    	/*
    	 * If dma->queue is empty, it signals no buffer has arrived from
    	 * user space, so, queue more transaction to drain dma
    	 */
    	if (list_empty(&dma->queue)) {
    		if (ti_csi2rx_drain_dma(ctx))
    			dev_warn(ctx->csi->dev, "DMA drain failed\n");
    	} else {
    		ti_csi2rx_dma_submit_pending(ctx);
    	}
    	spin_unlock_irqrestore(&dma->lock, flags);
    }
    
    /*
     * Drain the stale data left at the PSI-L endpoint.
     *
     * This might happen if no buffers are queued in time but source is still
     * streaming. In multi-stream scenarios this can happen when one stream is
     * stopped but other is still streaming, and thus module-level pixel reset is
     * not asserted.
     *
     * To prevent that stale data corrupting the subsequent transactions, it is
     * required to issue DMA requests to drain it out.
     */
    static int ti_csi2rx_drain_dma(struct ti_csi2rx_ctx *ctx)
    {
    	struct ti_csi2rx_dev *csi = ctx->csi;
    	struct dma_async_tx_descriptor *desc;
    		dma_cookie_t cookie;
    	int ret;
    
    	desc = dmaengine_prep_slave_single(ctx->dma.chan, csi->drain.paddr,
    					   csi->drain.len, DMA_DEV_TO_MEM,
    					   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    	if (!desc) {
    		ret = -EIO;
    		goto out;
    	}
    
    	desc->callback = ti_csi2rx_drain_callback;
    	desc->callback_param = ctx;
    
    	cookie = dmaengine_submit(desc);
    	ret = dma_submit_error(cookie);
    	if (ret)
    		goto out;
    
    	dma_async_issue_pending(ctx->dma.chan);
    
    	out:
    	return ret;
    }
    
    static int ti_csi2rx_dma_submit_pending(struct ti_csi2rx_ctx *ctx)
    {
    	struct ti_csi2rx_dma *dma = &ctx->dma;
    	struct ti_csi2rx_buffer *buf;
    	int ret = 0;
    
    	/* If there are more buffers to process then start their transfer. */
    	while (!list_empty(&dma->queue)) {
    		buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
    		ret = ti_csi2rx_start_dma(ctx, buf);
    		if (ret) {
    			dev_err(ctx->csi->dev,
    				"Failed to queue the next buffer for DMA\n");
    			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
    			break;
    		}
    		list_move_tail(&buf->list, &dma->submitted);
    	}
    	return ret;
    }
    
    static void ti_csi2rx_dma_callback(void *param,
    				   const struct dmaengine_result *result)
    {
    	struct ti_csi2rx_buffer *buf = param;
    	struct ti_csi2rx_ctx *ctx = buf->ctx;
    	struct ti_csi2rx_dma *dma = &ctx->dma;
    	unsigned long flags;
    
    	/*
    	 * TODO: Derive the sequence number from the CSI2RX frame number
    	 * hardware monitor registers.
    	 */
    	buf->vb.vb2_buf.timestamp = ktime_get_ns();
    	buf->vb.sequence = ctx->sequence++;
    
    	spin_lock_irqsave(&dma->lock, flags);
    
    	WARN_ON(!list_is_first(&buf->list, &dma->submitted));
    	if (0 == (ctx->ok_frame_cnt + ctx->ng_frame_cnt))
    	{
     		ctx->rx_start = ktime_get();
    	}
    
    	if (result && (result->result != 5) & (result->result != DMA_TRANS_NOERROR || result->residue != 0))
    	{
    #if 0 // The log may bring something can't restored unless reboot the system, according to Qingfeng's test.
    		dev_err(ctx->csi->dev, "Failed DMA transfer for frame#%u of (stream: %d idx: %d, vc: %d),timestamp: %lld result=%d, residue=%u\n",
    			buf->vb.sequence, ctx->stream, ctx->idx, ctx->vc, buf->vb.vb2_buf.timestamp, result->result, result->residue
    			);
    #endif
    		ctx->ng_frame_cnt++;
    		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
    	} else {
    		ctx->ok_frame_cnt++;
    		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
    	}
    	list_del(&buf->list);
    
    	ti_csi2rx_dma_submit_pending(ctx);
    
    	if (list_empty(&dma->submitted)) {
    		if (ti_csi2rx_drain_dma(ctx))
    			dev_warn(ctx->csi->dev,
    				"DMA drain failed on one of the transactions\n");
    	}
    	spin_unlock_irqrestore(&dma->lock, flags);
    }
    
    static int ti_csi2rx_start_dma(struct ti_csi2rx_ctx *ctx,
    			       struct ti_csi2rx_buffer *buf)
    {
    	unsigned long addr;
    	struct dma_async_tx_descriptor *desc;
    	size_t len = ctx->v_fmt.fmt.pix.sizeimage;
    	dma_cookie_t cookie;
    	int ret = 0;
    	struct sg_table *sg;
    
    	// addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
    	// desc = dmaengine_prep_slave_single(ctx->dma.chan, addr, len,
    	// 				   DMA_DEV_TO_MEM,
    	// 				   DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    
    	sg = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
    	/*dev_err(ctx->csi->dev, "%s: Got scatter gather list with nents %d", __func__, sg->nents);*/
    	desc = dmaengine_prep_slave_sg(ctx->dma.chan, sg->sgl, sg->nents,
    				       DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
    				       DMA_CTRL_ACK);
    	if (!desc)
    		return -EIO;
    
    	desc->callback_result  = ti_csi2rx_dma_callback;
    	desc->callback_param = buf;
    
    	cookie = dmaengine_submit(desc);
    	ret = dma_submit_error(cookie);
    	if (ret)
    		return ret;
    
    	dma_async_issue_pending(ctx->dma.chan);
    
    	return 0;
    }
    
    static void ti_csi2rx_stop_dma(struct ti_csi2rx_ctx *ctx)
    {
    	struct ti_csi2rx_dma *dma = &ctx->dma;
    	struct ti_csi2rx_dev *csi = ctx->csi;
    	enum ti_csi2rx_dma_state state;
    	unsigned long flags;
    	int ret;
    
    	spin_lock_irqsave(&dma->lock, flags);
    	state = ctx->dma.state;
    	dma->state = TI_CSI2RX_DMA_STOPPED;
    	spin_unlock_irqrestore(&dma->lock, flags);
    
    	init_completion(&csi->drain_complete);
    
    	if (state != TI_CSI2RX_DMA_STOPPED) {
    		/*
    		 * Normal DMA termination does not clean up pending data on
    		 * the endpoint if multiple streams are running and only one
    		 * is stopped, as the module-level pixel reset cannot be
    		 * enforced before terminating DMA.
    		 */
    		ret = ti_csi2rx_drain_dma(ctx);
    		if (ret)
    			dev_warn(ctx->csi->dev,
    				 "Failed to drain DMA. Next frame might be bogus\n");
    	}
    
    	if (!wait_for_completion_timeout(&csi->drain_complete,
    					 msecs_to_jiffies(DRAIN_TIMEOUT_MS)))
    		dev_dbg(csi->dev, "DMA transfer timed out for drain buffer\n");
    
    	ret = dmaengine_terminate_sync(ctx->dma.chan);
    	if (ret)
    		dev_err(ctx->csi->dev, "Failed to stop DMA: %d\n", ret);
    }
    
    static void ti_csi2rx_cleanup_buffers(struct ti_csi2rx_ctx *ctx,
    				      enum vb2_buffer_state state)
    {
    	struct ti_csi2rx_dma *dma = &ctx->dma;
    	struct ti_csi2rx_buffer *buf, *tmp;
    		unsigned long flags;
    	
    	spin_lock_irqsave(&dma->lock, flags);
    	list_for_each_entry_safe(buf, tmp, &ctx->dma.queue, list) {
    		list_del(&buf->list);
    		vb2_buffer_done(&buf->vb.vb2_buf, state);
    	}
    	list_for_each_entry_safe(buf, tmp, &ctx->dma.submitted, list) {
    		list_del(&buf->list);
    		vb2_buffer_done(&buf->vb.vb2_buf, state);
    	}
    	spin_unlock_irqrestore(&dma->lock, flags);
    }
    
    static int ti_csi2rx_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
    				 unsigned int *nplanes, unsigned int sizes[],
    				 struct device *alloc_devs[])
    {
    	struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(q);
    	unsigned int size = ctx->v_fmt.fmt.pix.sizeimage;
    
    	if (*nplanes) {
    		if (sizes[0] < size)
    			return -EINVAL;
    		size = sizes[0];
    	}
    
    	*nplanes = 1;
    	sizes[0] = size;
    
    	return 0;
    }
    
    static int ti_csi2rx_buffer_prepare(struct vb2_buffer *vb)
    {
    	struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
    	unsigned long size = ctx->v_fmt.fmt.pix.sizeimage;
    
    	if (vb2_plane_size(vb, 0) < size) {
    		dev_err(ctx->csi->dev, "Data will not fit into plane\n");
    		return -EINVAL;
    	}
    
    	vb2_set_plane_payload(vb, 0, size);
    	return 0;
    }
    
    static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
    {
    	struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
    	struct ti_csi2rx_buffer *buf;
    	struct ti_csi2rx_dma *dma = &ctx->dma;
    		unsigned long flags = 0;
    	
    	buf = container_of(vb, struct ti_csi2rx_buffer, vb.vb2_buf);
    	buf->ctx = ctx;
    
    	spin_lock_irqsave(&dma->lock, flags);
    			list_add_tail(&buf->list, &dma->queue);
    				spin_unlock_irqrestore(&dma->lock, flags);
    }
    
    static int ti_csi2rx_get_vc(struct ti_csi2rx_ctx *ctx)
    {
    	struct ti_csi2rx_dev *csi = ctx->csi;
    	struct v4l2_mbus_frame_desc fd;
    	struct media_pad *pad;
    	int ret, i;
    
    	pad = media_entity_remote_pad_unique(&csi->subdev.entity, MEDIA_PAD_FL_SOURCE);
    	if (!pad)
    		return -ENODEV;
    
    	ret = v4l2_subdev_call(csi->source, pad, get_frame_desc, pad->index,
    			       &fd);
    	if (ret)
    		return ret;
    
    	if (fd.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
    		return -EINVAL;
    
    	for (i = 0; i < fd.num_entries; i++) {
    		if (ctx->stream == fd.entry[i].stream)
    			return fd.entry[i].bus.csi2.vc;
    	}
    
    	return -ENODEV;
    }
    
    static int ti_csi2rx_start_streaming(struct vb2_queue *vq, unsigned int count)
    {
    	struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vq);
    	struct ti_csi2rx_dev *csi = ctx->csi;
    	struct ti_csi2rx_dma *dma = &ctx->dma;
    	struct v4l2_subdev_krouting *routing;
    	struct v4l2_subdev_route *route = NULL;
    	struct media_pad *remote_pad;
    	unsigned long flags;
    	int ret = 0, i;
    	struct v4l2_subdev_state *state;
    
    	ret = pm_runtime_resume_and_get(csi->dev);
    	if (ret)
    		return ret;
    
    	spin_lock_irqsave(&dma->lock, flags);
    	if (list_empty(&dma->queue))
    		ret = -EIO;
    	spin_unlock_irqrestore(&dma->lock, flags);
    	if (ret)
    		return ret;
    
    	ret = video_device_pipeline_start(&ctx->vdev, &csi->pipe);
    	if (ret)
    		goto err;
    
    	remote_pad = media_entity_remote_source_pad_unique(ctx->pad.entity);
    	if (!remote_pad) {
    		ret = -ENODEV;
    		goto err;
    	}
    
    	state = v4l2_subdev_lock_and_get_active_state(&csi->subdev);
    
    	routing = &state->routing;
    
    	/* Find the stream to process. */
    	for (i = 0; i < routing->num_routes; i++) {
    		struct v4l2_subdev_route *r = &routing->routes[i];
    
    		if (!(r->flags & V4L2_SUBDEV_ROUTE_FL_ACTIVE))
    			continue;
    
    		if (r->source_pad != remote_pad->index)
    			continue;
    
    		route = r;
    		break;
    	}
    
    	if (!route) {
    		ret = -ENODEV;
    		v4l2_subdev_unlock_state(state);
    		goto err;
    	}
    
    	ctx->stream = route->sink_stream;
    
    	v4l2_subdev_unlock_state(state);
    
    	ret = ti_csi2rx_get_vc(ctx);
    	if (ret == -ENOIOCTLCMD)
    		ctx->vc = 0;
    	else if (ret < 0)
    		goto err;
    	else
    		ctx->vc = ret;
    
    	ti_csi2rx_setup_shim(ctx);
    
    	ctx->sequence = 0;
    
    	spin_lock_irqsave(&dma->lock, flags);
    
    	ret = ti_csi2rx_dma_submit_pending(ctx);
    	if (ret) {
    		spin_unlock_irqrestore(&dma->lock, flags);
    				goto err_dma;
    	}
    
    	dma->state = TI_CSI2RX_DMA_ACTIVE;
    	spin_unlock_irqrestore(&dma->lock, flags);
    
    	/* Start stream 0, we don't allow multiple streams on the source pad */
    	ret = v4l2_subdev_enable_streams(&csi->subdev,
    					 TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
    					 BIT(0));
    	if (ret)
    		goto err_dma;
    
    	return 0;
    
    err_dma:
        ti_csi2rx_stop_dma(ctx);
    	video_device_pipeline_stop(&ctx->vdev);
    	writel(0, csi->shim + SHIM_CNTL);
    	writel(0, csi->shim + SHIM_DMACNTX(ctx->idx));
    err:
    	ti_csi2rx_cleanup_buffers(ctx, VB2_BUF_STATE_QUEUED);
    	pm_runtime_put(csi->dev);
    
    	return ret;
    }
    
    static void ti_csi2rx_stop_streaming(struct vb2_queue *vq)
    {
    	struct ti_csi2rx_ctx *ctx = vb2_get_drv_priv(vq);
    	struct ti_csi2rx_dev *csi = ctx->csi;
    	int ret;
    
    	/* assert pixel reset to prevent stale data */
    	if (csi->enable_count == 1) {
    		writel(0, csi->shim + SHIM_DMACNTX(ctx->idx));
    		writel(0, csi->shim + SHIM_CNTL);
    	}
    
    	video_device_pipeline_stop(&ctx->vdev);
    	ti_csi2rx_stop_dma(ctx);
    
    	ret = v4l2_subdev_disable_streams(&csi->subdev,
    					  TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
    					  BIT(0));
    
    	if (ret)
    		dev_err(csi->dev, "Failed to stop subdev stream\n");
    
    	ti_csi2rx_cleanup_buffers(ctx, VB2_BUF_STATE_ERROR);
    	pm_runtime_put(csi->dev);
    }
    
    static const struct vb2_ops csi_vb2_qops = {
    	.queue_setup = ti_csi2rx_queue_setup,
    	.buf_prepare = ti_csi2rx_buffer_prepare,
    	.buf_queue = ti_csi2rx_buffer_queue,
    	.start_streaming = ti_csi2rx_start_streaming,
    	.stop_streaming = ti_csi2rx_stop_streaming,
    	.wait_prepare = vb2_ops_wait_prepare,
    	.wait_finish = vb2_ops_wait_finish,
    };
    
    static inline struct ti_csi2rx_dev *to_csi2rx_dev(struct v4l2_subdev *sd)
    {
    	return container_of(sd, struct ti_csi2rx_dev, subdev);
    }
    
    static int ti_csi2rx_sd_set_fmt(struct v4l2_subdev *sd,
    				struct v4l2_subdev_state *state,
    				struct v4l2_subdev_format *format)
    {
    	struct v4l2_mbus_framefmt *fmt;
    	int ret = 0;
    
    	/* No transcoding, don't allow setting source fmt */
    	if (format->pad >= TI_CSI2RX_PAD_FIRST_SOURCE)
    		return v4l2_subdev_get_fmt(sd, state, format);
    
    	if (!find_format_by_code(format->format.code))
    		format->format.code = ti_csi2rx_formats[0].code;
    
    	format->format.field = V4L2_FIELD_NONE;
    
    	fmt = v4l2_subdev_state_get_stream_format(state, format->pad, format->stream);
    	if (!fmt) {
    		ret = -EINVAL;
    		goto out;
    	}
    	*fmt = format->format;
    
    	fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
    							   format->stream);
    	if (!fmt) {
    		ret = -EINVAL;
    		goto out;
    	}
    	*fmt = format->format;
    
    out:
    	return ret;
    }
    
    static int _ti_csi2rx_sd_set_routing(struct v4l2_subdev *sd,
    				     struct v4l2_subdev_state *state,
    				     struct v4l2_subdev_krouting *routing)
    {
    	int ret;
    
    	const struct v4l2_mbus_framefmt format = {
    		.width = 640,
    		.height = 480,
    		.code = MEDIA_BUS_FMT_UYVY8_1X16,
    		.field = V4L2_FIELD_NONE,
    		.colorspace = V4L2_COLORSPACE_SRGB,
    		.ycbcr_enc = V4L2_YCBCR_ENC_601,
    		.quantization = V4L2_QUANTIZATION_LIM_RANGE,
    		.xfer_func = V4L2_XFER_FUNC_SRGB,
    	};
    
    	ret = v4l2_subdev_routing_validate(sd, routing,
    					   V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
    					   V4L2_SUBDEV_ROUTING_NO_SOURCE_MULTIPLEXING);
    
    	if (ret)
    		return ret;
    
    	ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
    
    	return ret;
    }
    
    static int ti_csi2rx_sd_set_routing(struct v4l2_subdev *sd,
    				    struct v4l2_subdev_state *state,
    				    enum v4l2_subdev_format_whence which,
    				    struct v4l2_subdev_krouting *routing)
    {
    	return _ti_csi2rx_sd_set_routing(sd, state, routing);
    }
    
    static int ti_csi2rx_sd_init_cfg(struct v4l2_subdev *sd,
    				 struct v4l2_subdev_state *state)
    {
    	struct v4l2_subdev_route routes[] = { {
    		.sink_pad = 0,
    		.sink_stream = 0,
    		.source_pad = TI_CSI2RX_PAD_FIRST_SOURCE,
    		.source_stream = 0,
    		.flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
    	} };
    
    	struct v4l2_subdev_krouting routing = {
    		.num_routes = 1,
    		.routes = routes,
    	};
    
    	/* Initialize routing to single route to the fist source pad */
    	return _ti_csi2rx_sd_set_routing(sd, state, &routing);
    }
    
    static int ti_csi2rx_sd_all_sink_streams(struct v4l2_subdev_state *state)
    {
    	struct v4l2_subdev_krouting *routing = &state->routing;
    	u64 sink_streams = 0;
    	int i;
    
    	for (i = 0; i < routing->num_routes; i++) {
    		struct v4l2_subdev_route *r = &routing->routes[i];
    
    		if (r->sink_pad == TI_CSI2RX_PAD_SINK)
    			sink_streams |= BIT(r->sink_stream);
    	}
    
    	return sink_streams;
    }
    
    static int ti_csi2rx_sd_enable_streams(struct v4l2_subdev *sd,
    				       struct v4l2_subdev_state *state,
    				       u32 pad, u64 streams_mask)
    {
    	struct ti_csi2rx_dev *csi = to_csi2rx_dev(sd);
    	struct media_pad *remote_pad;
    	int ret = 0;
    
    	remote_pad = media_entity_remote_source_pad_unique(&csi->subdev.entity);
    	if (!remote_pad)
    		return -ENODEV;
    
    	mutex_lock(&csi->mutex);
    
    	if (csi->enable_multi_stream) {
    		dev_dbg(csi->dev, "Enabling pad: %d, streams_mask: %lld.\n", pad, streams_mask);
    		ret = v4l2_subdev_enable_streams(csi->source, remote_pad->index, BIT(csi->enable_count));
    		if (ret)
    			goto out;
    
    		csi->enabled_streams_mask |= BIT(csi->enable_count);
    	} else {
    		if (!csi->enable_count) {
    			u64 sink_streams;
    	
    			sink_streams = ti_csi2rx_sd_all_sink_streams(state);
    			dev_dbg(csi->dev, "Enabling all streams (%llx) on sink.\n",
    				sink_streams);
    			ret = v4l2_subdev_enable_streams(csi->source, remote_pad->index,
    							 sink_streams);
    			if (ret)
    				goto out;
    			csi->enabled_streams_mask = sink_streams;
    		}
    	}
    
    	csi->enable_count++;
    out:
    	mutex_unlock(&csi->mutex);
    	return ret;
    }
    
    static int ti_csi2rx_sd_disable_streams(struct v4l2_subdev *sd,
    					struct v4l2_subdev_state *state,
    					u32 pad, u64 streams_mask)
    {
    	struct ti_csi2rx_dev *csi = to_csi2rx_dev(sd);
    	struct media_pad *remote_pad;
    	int ret = 0;
    
    	remote_pad = media_entity_remote_source_pad_unique(&csi->subdev.entity);
    	if (!remote_pad)
    		return -ENODEV;
    
    	mutex_lock(&csi->mutex);
    	if (csi->enable_count == 0) {
    		ret = -EINVAL;
    		goto out;
    	}
    
    	if (csi->enable_multi_stream) {
    		ret = v4l2_subdev_disable_streams(csi->source, remote_pad->index, BIT(csi->enable_count - 1));
    		if (ret)
    			goto out;
    
    		csi->enabled_streams_mask &= ~(BIT(csi->enable_count - 1));
    		if (csi->enable_count == 1) {
    			csi->enable_multi_stream = false;
    		}
    	} else {
    		if (csi->enable_count == 1) {
    			u64 sink_streams;
    
    			sink_streams = ti_csi2rx_sd_all_sink_streams(state);
    			dev_dbg(csi->dev, "Disabling all streams (%llx) on sink.\n",
    				sink_streams);
    			ret = v4l2_subdev_disable_streams(csi->source, remote_pad->index,
    							  sink_streams);
    			if (ret)
    				goto out;
    			csi->enabled_streams_mask = 0;
    		}
    	}
    
    	--csi->enable_count;
    out:
    	mutex_unlock(&csi->mutex);
    	return ret;
    }
    
    static long ti_csi2rx_sd_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
    {
    	struct ti_csi2rx_dev *csi = to_csi2rx_dev(sd);
    	int ret = 0;
    
    	switch (cmd) {
    	case ADTOF_ENABLE_STREAM_NUM:
    		mutex_lock(&csi->mutex);
    		if (*((u32 *)arg) > 0)
    			csi->enable_multi_stream = true;
    
    		dev_err(csi->dev, "enable multi stream: %u from %d.\n", csi->enable_multi_stream, *((u32 *)arg));
    		mutex_unlock(&csi->mutex);
    		break;
    
    	default:
    		return -ENOIOCTLCMD;
    	}
    
    	return ret;
    }
    
    static const struct v4l2_subdev_core_ops ti_csi2rx_subdev_core_ops =
    {
    	.ioctl = ti_csi2rx_sd_ioctl, 
    };
    
    static const struct v4l2_subdev_pad_ops ti_csi2rx_subdev_pad_ops = {
    	.init_cfg = ti_csi2rx_sd_init_cfg,
    	.set_routing = ti_csi2rx_sd_set_routing,
    	.get_fmt = v4l2_subdev_get_fmt,
    	.set_fmt = ti_csi2rx_sd_set_fmt,
    	.enable_streams = ti_csi2rx_sd_enable_streams,
    	.disable_streams = ti_csi2rx_sd_disable_streams,
    };
    
    static const struct v4l2_subdev_ops ti_csi2rx_subdev_ops = {
    	.core = &ti_csi2rx_subdev_core_ops, 
    	.pad = &ti_csi2rx_subdev_pad_ops,
    };
    
    static void ti_csi2rx_cleanup_v4l2(struct ti_csi2rx_dev *csi)
    {
    	v4l2_subdev_cleanup(&csi->subdev);
    	media_device_unregister(&csi->mdev);
    	v4l2_device_unregister(&csi->v4l2_dev);
    	media_device_cleanup(&csi->mdev);
    }
    
    static void ti_csi2rx_cleanup_notifier(struct ti_csi2rx_dev *csi)
    {
    	v4l2_async_nf_unregister(&csi->notifier);
    	v4l2_async_nf_cleanup(&csi->notifier);
    }
    
    static void ti_csi2rx_cleanup_ctx(struct ti_csi2rx_ctx *ctx)
    {
    	if (!pm_runtime_status_suspended(ctx->csi->dev))
    		dma_release_channel(ctx->dma.chan);
    
    	vb2_queue_release(&ctx->vidq);
    
    	video_unregister_device(&ctx->vdev);
    
    	mutex_destroy(&ctx->mutex);
    }
    
    static int ti_csi2rx_init_vb2q(struct ti_csi2rx_ctx *ctx)
    {
    	struct vb2_queue *q = &ctx->vidq;
    	int ret;
    
    	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
    	//q->io_modes = VB2_MMAP | VB2_DMABUF;
    	q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR;
    	q->drv_priv = ctx;
    	q->buf_struct_size = sizeof(struct ti_csi2rx_buffer);
    	q->ops = &csi_vb2_qops;
    	//q->mem_ops = &vb2_dma_contig_memops;
    	q->mem_ops = &vb2_dma_sg_memops;
    	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
    	q->dev = dmaengine_get_dma_device(ctx->dma.chan);
    	q->lock = &ctx->mutex;
    	q->min_buffers_needed = 1;
    	q->allow_cache_hints = 1;
    
    	ret = vb2_queue_init(q);
    	if (ret)
    		return ret;
    
    	ctx->vdev.queue = q;
    
    	return 0;
    }
    
    static int ti_csi2rx_link_validate_get_fmt(struct media_pad *pad,
    					   struct v4l2_subdev_format *fmt)
    {
    	if (is_media_entity_v4l2_subdev(pad->entity)) {
    		struct v4l2_subdev *sd =
    			media_entity_to_v4l2_subdev(pad->entity);
    
    		fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
    		fmt->pad = pad->index;
    		return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
    	}
    
    	WARN(pad->entity->function != MEDIA_ENT_F_IO_V4L,
    	     "Driver bug! Wrong media entity type 0x%08x, entity %s\n",
    	     pad->entity->function, pad->entity->name);
    
    	return -EINVAL;
    }
    
    static int ti_csi2rx_link_validate(struct media_link *link)
    {
    	struct media_entity *entity = link->sink->entity;
    	struct video_device *vdev = media_entity_to_video_device(entity);
    	struct ti_csi2rx_ctx *ctx = container_of(vdev, struct ti_csi2rx_ctx, vdev);
    	struct ti_csi2rx_dev *csi = ctx->csi;
    	struct v4l2_pix_format *csi_fmt = &ctx->v_fmt.fmt.pix;
    	struct v4l2_subdev_format source_fmt;
    	const struct ti_csi2rx_fmt *ti_fmt;
    	int ret;
    
    	ret = ti_csi2rx_link_validate_get_fmt(link->source, &source_fmt);
    	if (ret)
    		return ret;
    
    	if (source_fmt.format.width != csi_fmt->width) {
    		dev_err(csi->dev, "Width does not match (source %u, sink %u)\n",
    			source_fmt.format.width, csi_fmt->width);
    		return -EPIPE;
    	}
    
    	if (source_fmt.format.height != csi_fmt->height) {
    		dev_err(csi->dev, "Height does not match (source %u, sink %u)\n",
    			source_fmt.format.height, csi_fmt->height);
    		return -EPIPE;
    	}
    
    	if (source_fmt.format.field != csi_fmt->field &&
    	    csi_fmt->field != V4L2_FIELD_NONE) {
    		dev_err(csi->dev, "Field does not match (source %u, sink %u)\n",
    			source_fmt.format.field, csi_fmt->field);
    		return -EPIPE;
    	}
    
    	ti_fmt = find_format_by_code(source_fmt.format.code);
    	if (!ti_fmt) {
    		dev_err(csi->dev, "Media bus format 0x%x not supported\n",
    			source_fmt.format.code);
    		return -EPIPE;
    	}
    
    	if (ctx->v_fmt.fmt.pix.pixelformat != ti_fmt->fourcc) {
    		dev_err(csi->dev,
    			"Cannot transform source fmt 0x%x to sink fmt 0x%x\n",
    			ctx->v_fmt.fmt.pix.pixelformat, ti_fmt->fourcc);
    		return -EPIPE;
    	}
    
    	return 0;
    }
    
    static const struct media_entity_operations ti_csi2rx_video_entity_ops = {
    	.link_validate = ti_csi2rx_link_validate,
    };
    
    static const struct media_entity_operations ti_csi2rx_subdev_entity_ops = {
    	.link_validate = v4l2_subdev_link_validate,
    };
    
    static int ti_csi2rx_init_dma(struct ti_csi2rx_ctx *ctx)
    {
    	struct dma_slave_config cfg = {
    		.src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES,
    	};
    	char name[32];
    	int ret;
    
    	snprintf(name, sizeof(name), "rx%u", ctx->idx);
    	ctx->dma.chan = dma_request_chan(ctx->csi->dev, name);
    	if (IS_ERR(ctx->dma.chan))
    		return PTR_ERR(ctx->dma.chan);
    
    	ret = dmaengine_slave_config(ctx->dma.chan, &cfg);
    	if (ret) {
    		dma_release_channel(ctx->dma.chan);
    		return ret;
    	}
    
    	return 0;
    }
    
    static int ti_csi2rx_v4l2_init(struct ti_csi2rx_dev *csi)
    {
    	struct media_device *mdev = &csi->mdev;
    	struct v4l2_subdev *sd = &csi->subdev;
    	int ret, i;
    
    	mdev->dev = csi->dev;
    	mdev->hw_revision = 1;
    	strscpy(mdev->model, "TI-CSI2RX", sizeof(mdev->model));
    
    	media_device_init(mdev);
    
    	csi->v4l2_dev.mdev = mdev;
    
    	ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
    	if (ret)
    		goto cleanup_media;
    
    	ret = media_device_register(mdev);
    	if (ret)
    		goto unregister_v4l2;
    
    	v4l2_subdev_init(sd, &ti_csi2rx_subdev_ops);
    	sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
    	sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS;
    	strscpy(sd->name, dev_name(csi->dev), sizeof(sd->name));
    	sd->dev = csi->dev;
    sd->entity.ops = &ti_csi2rx_subdev_entity_ops;
    
    	csi->pads[TI_CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
    
    	for (unsigned int i = TI_CSI2RX_PAD_FIRST_SOURCE;
    	     i < TI_CSI2RX_PAD_FIRST_SOURCE + csi->num_ctx; i++)
    		csi->pads[i].flags = MEDIA_PAD_FL_SOURCE;
    
    	ret = media_entity_pads_init(&sd->entity,
    				     TI_CSI2RX_PAD_FIRST_SOURCE + csi->num_ctx,
    				     csi->pads);
    	if (ret)
    		goto unregister_media;
    
    	ret = v4l2_subdev_init_finalize(sd);
    	if (ret)
    		goto unregister_media;
    
    	ret = v4l2_device_register_subdev(&csi->v4l2_dev, sd);
    	if (ret)
    		goto cleanup_subdev;
    
    	return 0;
    
    cleanup_subdev:
    	v4l2_subdev_cleanup(sd);
    unregister_media:
    	media_device_unregister(mdev);
    unregister_v4l2:
    	v4l2_device_unregister(&csi->v4l2_dev);
    cleanup_media:
    	media_device_cleanup(mdev);
    
    	return ret;
    }
    
    static int ti_csi2rx_init_ctx(struct ti_csi2rx_ctx *ctx)
    {
    	struct ti_csi2rx_dev *csi = ctx->csi;
    	struct video_device *vdev = &ctx->vdev;
    	const struct ti_csi2rx_fmt *fmt;
    	struct v4l2_pix_format *pix_fmt = &ctx->v_fmt.fmt.pix;
    	int ret;
    
    	mutex_init(&ctx->mutex);
    
    	fmt = find_format_by_fourcc(V4L2_PIX_FMT_UYVY);
    	if (!fmt)
    		return -EINVAL;
    
    	pix_fmt->width = 640;
    	pix_fmt->height = 480;
    	pix_fmt->field = V4L2_FIELD_NONE;
    	pix_fmt->colorspace = V4L2_COLORSPACE_SRGB;
    	pix_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601,
    	pix_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE,
    	pix_fmt->xfer_func = V4L2_XFER_FUNC_SRGB,
    
    	ti_csi2rx_fill_fmt(fmt, &ctx->v_fmt);
    
    	ctx->pad.flags = MEDIA_PAD_FL_SINK;
    	//vdev->entity.ops = &ti_csi2rx_video_entity_ops;
    	ret = media_entity_pads_init(&ctx->vdev.entity, 1, &ctx->pad);
    	if (ret)
    		return ret;
    
    	snprintf(vdev->name, sizeof(vdev->name), "%s context %u",
    		 dev_name(csi->dev), ctx->idx);
    	vdev->v4l2_dev = &csi->v4l2_dev;
    	vdev->vfl_dir = VFL_DIR_RX;
    	vdev->fops = &csi_fops;
    	vdev->ioctl_ops = &csi_ioctl_ops;
    	vdev->release = video_device_release_empty;
    	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
    			    V4L2_CAP_IO_MC;
    	vdev->lock = &ctx->mutex;
    	video_set_drvdata(vdev, ctx);
    
    	INIT_LIST_HEAD(&ctx->dma.queue);
    	INIT_LIST_HEAD(&ctx->dma.submitted);
    	spin_lock_init(&ctx->dma.lock);
    	ctx->dma.state = TI_CSI2RX_DMA_STOPPED;
    
    	ret = ti_csi2rx_init_dma(ctx);
    	if (ret)
    		return ret;
    
    	ret = ti_csi2rx_init_vb2q(ctx);
    	if (ret)
    		goto cleanup_dma;
    
    	return 0;
    
    cleanup_dma:
    	dma_release_channel(ctx->dma.chan);
    	return ret;
    }
    
    #ifdef CONFIG_PM
    static int ti_csi2rx_suspend(struct device *dev)
    {
    	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
    	enum ti_csi2rx_dma_state state;
    	struct ti_csi2rx_ctx *ctx;
    	struct ti_csi2rx_dma *dma;
    	unsigned long flags = 0;
    	int i, ret = 0;
    
    	/* If device was not in use we can simply suspend */
    	if (pm_runtime_status_suspended(dev))
    		return 0;
    
    	/*
    	 * If device is running, assert the pixel reset to cleanly stop any
    	 * on-going streams before we suspend.
    	 */
    	writel(0, csi->shim + SHIM_CNTL);
    
    	for (i = 0; i < csi->num_ctx; i++) {
    		ctx = &csi->ctx[i];
    		dma = &ctx->dma;
    
    		spin_lock_irqsave(&dma->lock, flags);
    		state = dma->state;
    		spin_unlock_irqrestore(&dma->lock, flags);
    
    		if (state != TI_CSI2RX_DMA_STOPPED) {
    			/* Disable source */
    			ret = v4l2_subdev_disable_streams(&csi->subdev,
    							  TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
    							  BIT(0));
    			if (ret)
    				dev_err(csi->dev, "Failed to stop subdev stream\n");
    		}
    
    		/* Stop any on-going streams */
    		writel(0, csi->shim + SHIM_DMACNTX(ctx->idx));
    
    		/* Drain DMA */
    		ti_csi2rx_drain_dma(ctx);
    
    		/* Terminate DMA */
    		ret = dmaengine_terminate_sync(ctx->dma.chan);
    		if (ret)
    			dev_err(csi->dev, "Failed to stop DMA\n");
    	}
    
    	return ret;
    }
    
    static int ti_csi2rx_resume(struct device *dev)
    {
    	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
    	struct ti_csi2rx_ctx *ctx;
    	struct ti_csi2rx_dma *dma;
    	struct ti_csi2rx_buffer *buf;
    	unsigned long flags = 0;
    	unsigned int reg;
    	int i, ret = 0;
    
    	/* If device was not in use, we can simply wakeup */
    	if (pm_runtime_status_suspended(dev))
    		return 0;
    
    	/* If device was in use before, restore all the running streams */
    	reg = SHIM_CNTL_PIX_RST;
    	writel(reg, csi->shim + SHIM_CNTL);
    
    	for (i = 0; i < csi->num_ctx; i++) {
    		ctx = &csi->ctx[i];
    		dma = &ctx->dma;
    		spin_lock_irqsave(&dma->lock, flags);
    		if (dma->state != TI_CSI2RX_DMA_STOPPED) {
    			/* Re-submit all previously submitted buffers to DMA */
    			list_for_each_entry(buf, &ctx->dma.submitted, list) {
    				ti_csi2rx_start_dma(ctx, buf);
    			}
    			spin_unlock_irqrestore(&dma->lock, flags);
    
    			/* Restore stream config */
    			ti_csi2rx_setup_shim(ctx);
    
    			ret = v4l2_subdev_enable_streams(&csi->subdev,
    							 TI_CSI2RX_PAD_FIRST_SOURCE + ctx->idx,
    							 BIT(0));
    			if (ret)
    				dev_err(ctx->csi->dev, "Failed to start subdev\n");
    		} else {
    			spin_unlock_irqrestore(&dma->lock, flags);
    		}
    	}
    
    	return ret;
    }
    
    static int ti_csi2rx_runtime_suspend(struct device *dev)
    {
    	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
    	int i;
    
    	if (csi->enable_count != 0)
    		return -EBUSY;
    
    	for (i = 0; i < csi->num_ctx; i++)
    		dma_release_channel(csi->ctx[i].dma.chan);
    
    	return 0;
    }
    
    static int ti_csi2rx_runtime_resume(struct device *dev)
    {
    	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
    	int ret, i;
    
    	for (i = 0; i < csi->num_ctx; i++) {
    		ret = ti_csi2rx_init_dma(&csi->ctx[i]);
    		if (ret)
    			return ret;
    	}
    
    	return 0;
    }
    
    static int ti_csi2rx_pm_notifier(struct notifier_block *nb, unsigned long action,
    			  void *data)
    {
    	struct ti_csi2rx_dev *csi =
    		container_of(nb, struct ti_csi2rx_dev, pm_notifier);
    
    	switch (action) {
    	case PM_HIBERNATION_PREPARE:
    	case PM_SUSPEND_PREPARE:
    	case PM_RESTORE_PREPARE:
    		ti_csi2rx_suspend(csi->dev);
    		break;
    	case PM_POST_SUSPEND:
    	case PM_POST_HIBERNATION:
    	case PM_POST_RESTORE:
    		ti_csi2rx_resume(csi->dev);
    		break;
    	}
    
    	return NOTIFY_DONE;
    }
    
    static const struct dev_pm_ops ti_csi2rx_pm_ops = {
    		SET_RUNTIME_PM_OPS(ti_csi2rx_runtime_suspend, ti_csi2rx_runtime_resume,
    			   NULL)
    };
    #endif /* CONFIG_PM */
    
    static ssize_t csi_rx_status_show(struct device *dev,
    				struct device_attribute *attr, char *buf)
    {
    	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
    	int tmp_len = 0;
    	char tmp[512] = {0};
    	char *p_tmp;
    	int i;
    	s64 us_delta;
    	int total_fps = 0;
    	int ok_fps = 0;
    
    	p_tmp = tmp;
    	for (i = 0; i < csi->num_ctx; i++)
    	{
    		us_delta = ktime_us_delta(ktime_get(), csi->ctx[i].rx_start);
    		total_fps = (csi->ctx[i].ok_frame_cnt + csi->ctx[i].ng_frame_cnt) / (us_delta / 1000000);
    		ok_fps = (csi->ctx[i].ok_frame_cnt) / (us_delta / 1000000);
    		tmp_len = sprintf(p_tmp, "stream[%d] ok: %d, ng: %d, total_fps: %d, ok_fps: %d\n", 
    			i, csi->ctx[i].ok_frame_cnt, csi->ctx[i].ng_frame_cnt, total_fps, ok_fps);
    		p_tmp += tmp_len;
    	}
    
    	return scnprintf(buf, PAGE_SIZE, "%s\n", tmp);
    }
    
    static ssize_t csi_rx_status_store(struct device *dev,
    				struct device_attribute *attr,
    				const char *buf,
    				size_t count)
    {
    	struct ti_csi2rx_dev *csi = dev_get_drvdata(dev);
    	int i;
    	int ret = 0;
    	char cmd[20];
    
    	ret = sscanf(buf, "%s", cmd);
    	if (ret == 1 && 6 == count)
    	{
    		ret = memcmp(cmd,"Clear",5);
    		if (0 == ret)
    		{
    			for (i = 0; i < csi->num_ctx; i++)
    			{
    				csi->ctx[i].ok_frame_cnt = 0;
    				csi->ctx[i].ng_frame_cnt = 0;
    			}
    			ret = 0; // avoid "write error: Operation not permitted"
    		}
    		else {
    			return -EINVAL;
    		}
    	}
    	else {
    		return -EINVAL;
    	}
    
    	return ret ? -1 : count;
    }
    
    static DEVICE_ATTR_RW(csi_rx_status);
    
    static struct attribute *ti_csi2rx_dev_attrs[] = {
    	&dev_attr_csi_rx_status.attr,
    	NULL,
    };
    
    static const struct attribute_group ti_csi2rx_dev_attrs_group = {
    	.attrs = ti_csi2rx_dev_attrs,
    };
    
    static int ti_csi2rx_register_attrib_group(struct device *dev)
    {
    	return sysfs_create_group(&dev->kobj, &ti_csi2rx_dev_attrs_group);
    }
    
    static int ti_csi2rx_probe(struct platform_device *pdev)
    {
    	struct device_node *np = pdev->dev.of_node;
    	struct ti_csi2rx_dev *csi;
    	int ret, i, count;
    
    	csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
    	if (!csi)
    		return -ENOMEM;
    
    	csi->dev = &pdev->dev;
    	platform_set_drvdata(pdev, csi);
    
    	csi->shim = devm_platform_ioremap_resource(pdev, 0);
    	if (IS_ERR(csi->shim)) {
    		ret = PTR_ERR(csi->shim);
    		return ret;
    	}
    
    	csi->drain.len = DRAIN_BUFFER_SIZE;
    	csi->drain.vaddr = dma_alloc_coherent(csi->dev, csi->drain.len,
    					      &csi->drain.paddr,
    					      GFP_KERNEL);
    	if (!csi->drain.vaddr)
    		return -ENOMEM;
    
    	/* Only use as many contexts as the number of DMA channels allocated. */
    	count = of_property_count_strings(np, "dma-names");
    	if (count < 0) {
    		dev_err(csi->dev, "Failed to get DMA channel count: %d\n",
    			count);
    		return count;
    	}
    	csi->enable_multi_stream = false;
    	csi->num_ctx = count;
    	if (csi->num_ctx > TI_CSI2RX_MAX_CTX) {
    		dev_warn(csi->dev,
    			 "%u DMA channels passed. Maximum is %u. Ignoring the rest.\n",
    			 csi->num_ctx, TI_CSI2RX_MAX_CTX);
    		csi->num_ctx = TI_CSI2RX_MAX_CTX;
    	}
    
    	mutex_init(&csi->mutex);
    
    	ret = ti_csi2rx_v4l2_init(csi);
    	if (ret)
    		goto err_v4l2;
    
    	for (i = 0; i < csi->num_ctx; i++) {
    		csi->ctx[i].idx = i;
    		csi->ctx[i].csi = csi;
    		ret = ti_csi2rx_init_ctx(&csi->ctx[i]);
    		if (ret)
    			goto err_ctx;
    	}
    
    	ret = ti_csi2rx_notifier_register(csi);
    	if (ret)
    		goto err_ctx;
    
    	ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev);
    	if (ret) {
    		dev_err(csi->dev, "Failed to create children: %d\n", ret);
    		goto err_notifier;
    	}
    
    	ret = ti_csi2rx_register_attrib_group(csi->dev);
    	if (ret) {
    		dev_err(&pdev->dev, "Error creating sysfs attribute group for j721e-csi2rx driver\n");
    	}
    	csi->pm_notifier.notifier_call = ti_csi2rx_pm_notifier;
    	ret = register_pm_notifier(&csi->pm_notifier);
    	if (ret) {
    		dev_err(csi->dev, "Failed to create PM notifier: %d\n", ret);
    		goto err_notifier;
    	}
    
    	pm_runtime_set_active(csi->dev);
    	pm_runtime_enable(csi->dev);
    	pm_request_idle(csi->dev);
    
    	return 0;
    
    err_notifier:
    	ti_csi2rx_cleanup_notifier(csi);
    err_ctx:
    	i--;
    	for (; i >= 0; i--)
    		ti_csi2rx_cleanup_ctx(&csi->ctx[i]);
    	ti_csi2rx_cleanup_v4l2(csi);
    err_v4l2:
    	mutex_destroy(&csi->mutex);
    	dma_free_coherent(csi->dev, csi->drain.len, csi->drain.vaddr,
    			  csi->drain.paddr);
    	return ret;
    }
    
    static int ti_csi2rx_remove(struct platform_device *pdev)
    {
    	struct ti_csi2rx_dev *csi = platform_get_drvdata(pdev);
    	unsigned int i;
    
    	for (i = 0; i < csi->num_ctx; i++)
    		ti_csi2rx_cleanup_ctx(&csi->ctx[i]);
    
    	unregister_pm_notifier(&csi->pm_notifier);
    	ti_csi2rx_cleanup_notifier(csi);
    	ti_csi2rx_cleanup_v4l2(csi);
    	mutex_destroy(&csi->mutex);
    	dma_free_coherent(csi->dev, csi->drain.len, csi->drain.vaddr,
    			  csi->drain.paddr);
    
    	pm_runtime_disable(&pdev->dev);
    	pm_runtime_set_suspended(&pdev->dev);
    	return 0;
    }
    
    static const struct of_device_id ti_csi2rx_of_match[] = {
    	{ .compatible = "ti,j721e-csi2rx-shim", },
    	{ },
    };
    MODULE_DEVICE_TABLE(of, ti_csi2rx_of_match);
    
    static struct platform_driver ti_csi2rx_pdrv = {
    	.probe = ti_csi2rx_probe,
    	.remove = ti_csi2rx_remove,
    	.driver = {
    		.name		= TI_CSI2RX_MODULE_NAME,
    		.of_match_table	= ti_csi2rx_of_match,
    #ifdef CONFIG_PM
    		.pm		= &ti_csi2rx_pm_ops,
    #endif
    	},
    };
    
    module_platform_driver(ti_csi2rx_pdrv);
    
    MODULE_DESCRIPTION("TI J721E CSI2 RX Driver");
    MODULE_AUTHOR("Jai Luthra <j-luthra@ti.com>");
    MODULE_LICENSE("GPL");
    

    // SPDX-License-Identifier: GPL-2.0+
    /*
     * Driver for Cadence MIPI-CSI2 RX Controller v1.3
     *
     * Copyright (C) 2017 Cadence Design Systems Inc.
     */
    
    #include <linux/clk.h>
    #include <linux/delay.h>
    #include <linux/io.h>
    #include <linux/iopoll.h>
    #include <linux/module.h>
    #include <linux/of.h>
    #include <linux/of_graph.h>
    #include <linux/phy/phy.h>
    #include <linux/platform_device.h>
    #include <linux/reset.h>
    #include <linux/slab.h>
    
    #include <media/v4l2-ctrls.h>
    #include <media/v4l2-device.h>
    #include <media/v4l2-fwnode.h>
    #include <media/v4l2-subdev.h>
    
    #define CSI2RX_DEVICE_CFG_REG			0x000
    
    #define CSI2RX_SOFT_RESET_REG			0x004
    #define CSI2RX_SOFT_RESET_PROTOCOL			BIT(1)
    #define CSI2RX_SOFT_RESET_FRONT				BIT(0)
    
    #define CSI2RX_STATIC_CFG_REG			0x008
    #define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane)	((plane) << (16 + (llane) * 4))
    #define CSI2RX_STATIC_CFG_LANES_MASK			GENMASK(11, 8)
    
    #define CSI2RX_DPHY_ERROR_BYPASS_CFG		0x10
    #define CSI2RX_ERROR_BYPASS_DATA_ID            BIT(2)
    #define CSI2RX_ERROR_BYPASS_ECC                        BIT(1)
    #define CSI2RX_ERROR_BYPASS_CRC                        BIT(0)
    
    
    #define CSI2RX_DPHY_MONITOR_IRQS			0x18
    #define CSI2RX_DPHY_MONITOR_IRQS_MASK_CFG	0x1c
    #define CSI2RX_DPHY_INFO_IRQS				0x20
    #define CSI2RX_DPHY_INFO_IRQS_MASK_CFG		0x24
    #define CSI2RX_DPHY_ERROR_IRQS				0x28
    #define CSI2RX_DPHY_ERROR_IRQS_MASK_CFG		0x2c
    #define CSI2RX_DPHY_DPHY_STATUS				0x48
    #define CSI2RX_DPHY_ERR_STATUS_IRQ			0x4c
    #define CSI2RX_DPHY_ERR_IRQ_MASK_CFG		0x50
    #define CSI2RX_INTEGRATION_DEBUG			0x60
    #define CSI2RX_DPHY_ERROR_DEBUG_REG			0x74
    
    #define CSI2RX_DPHY_LANE_CTRL_REG		0x40
    #define CSI2RX_DPHY_CL_RST			BIT(16)
    #define CSI2RX_DPHY_DL_RST(i)			BIT((i) + 12)
    #define CSI2RX_DPHY_CL_EN			BIT(4)
    #define CSI2RX_DPHY_DL_EN(i)			BIT(i)
    
    #define CSI2RX_STREAM_BASE(n)		(((n) + 1) * 0x100)
    
    #define CSI2RX_STREAM_CTRL_REG(n)		(CSI2RX_STREAM_BASE(n) + 0x000)
    #define CSI2RX_STREAM_CTRL_SOFT_RST			BIT(4)
    #define CSI2RX_STREAM_CTRL_STOP				BIT(1)
    #define CSI2RX_STREAM_CTRL_START			BIT(0)
    
    #define CSI2RX_STREAM_STATUS_REG(n)		(CSI2RX_STREAM_BASE(n) + 0x004)
    #define CSI2RX_STREAM_STATUS_RDY			BIT(31)
    
    #define CSI2RX_STREAM_DATA_CFG_REG(n)		(CSI2RX_STREAM_BASE(n) + 0x008)
    #define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n)		BIT((n) + 16)
    #define CSI2RX_STREAM_DATA_CFG_VC_ALL			0
    
    #define CSI2RX_STREAM_CFG_REG(n)		(CSI2RX_STREAM_BASE(n) + 0x00c)
    #define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF		(1 << 8)
    #define CSI2RX_STREAM_CFG_NUM_PIXELS_MASK		GENMASK(5, 4)
    #define CSI2RX_STREAM_CFG_NUM_PIXELS(n)			((n) >> 1)
    
    #define CSI2RX_LANES_MAX	4
    #define CSI2RX_STREAMS_MAX	4
    
    #define CSI2RX_ERROR_IRQS_REG			0x28
    #define CSI2RX_ERROR_IRQS_MASK_REG		0x2C
    
    #define CSI2RX_STREAM3_FIFO_OVERFLOW_IRQ	BIT(19)
    #define CSI2RX_STREAM2_FIFO_OVERFLOW_IRQ	BIT(18)
    #define CSI2RX_STREAM1_FIFO_OVERFLOW_IRQ	BIT(17)
    #define CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ	BIT(16)
    #define CSI2RX_FRONT_TRUNC_HDR_IRQ		BIT(12)
    #define CSI2RX_PROT_TRUNCATED_PACKET_IRQ	BIT(11)
    #define CSI2RX_FRONT_LP_NO_PAYLOAD_IRQ		BIT(10)
    #define CSI2RX_SP_INVALID_RCVD_IRQ		BIT(9)
    #define CSI2RX_DATA_ID_IRQ			BIT(7)
    #define CSI2RX_HEADER_CORRECTED_ECC_IRQ	BIT(6)
    #define CSI2RX_HEADER_ECC_IRQ			BIT(5)
    #define CSI2RX_PAYLOAD_CRC_IRQ			BIT(4)
    
    #define CSI2RX_ECC_ERRORS		GENMASK(7, 4)
    #define CSI2RX_PACKET_ERRORS		GENMASK(12, 9)
    
    enum csi2rx_pads {
    	CSI2RX_PAD_SINK,
    	CSI2RX_PAD_SOURCE_STREAM0,
    	CSI2RX_PAD_SOURCE_STREAM1,
    	CSI2RX_PAD_SOURCE_STREAM2,
    	CSI2RX_PAD_SOURCE_STREAM3,
    	CSI2RX_PAD_MAX,
    };
    
    struct csi2rx_fmt {
    	u32				code;
    /* width of a single pixel on CSI-2 bus */
    	u8				bpp;
    /* max pixels per clock supported on output bus */
    	u8				max_pixels;
    };
    
    struct csi2rx_event {
    	u32 mask;
    	const char *name;
    };
    
    static const struct csi2rx_event csi2rx_events[] = {
    	{ CSI2RX_STREAM3_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 3 FIFO detected" },
    	{ CSI2RX_STREAM2_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 2 FIFO detected" },
    	{ CSI2RX_STREAM1_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 1 FIFO detected" },
    	{ CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ, "Overflow of the Stream 0 FIFO detected" },
    	{ CSI2RX_FRONT_TRUNC_HDR_IRQ, "A truncated header [short or long] has been received" },
    	{ CSI2RX_PROT_TRUNCATED_PACKET_IRQ, "A truncated long packet has been received" },
    	{ CSI2RX_FRONT_LP_NO_PAYLOAD_IRQ, "A truncated long packet has been received. No payload" },
    	{ CSI2RX_SP_INVALID_RCVD_IRQ, "A reserved or invalid short packet has been received" },
    	{ CSI2RX_DATA_ID_IRQ, "Data ID error in the header packet" },
    	{ CSI2RX_HEADER_CORRECTED_ECC_IRQ, "ECC error detected and corrected" },
    	{ CSI2RX_HEADER_ECC_IRQ, "Unrecoverable ECC error" },
    	{ CSI2RX_PAYLOAD_CRC_IRQ, "CRC error" },
    };
    #define CSI2RX_NUM_EVENTS		ARRAY_SIZE(csi2rx_events)
    struct csi2rx_priv {
    	struct device			*dev;
    	unsigned int			count;
    	int				error_irq;
    
    	/*
    	 * Used to prevent race conditions between multiple,
    	 * concurrent calls to start and stop.
    	 */
    	struct mutex			lock;
    
    	void __iomem			*base;
    	struct clk			*sys_clk;
    	struct clk			*p_clk;
    	struct clk			*pixel_clk[CSI2RX_STREAMS_MAX];
    	struct reset_control		*sys_rst;
    	struct reset_control		*p_rst;
    	struct reset_control		*pixel_rst[CSI2RX_STREAMS_MAX];
    	struct phy			*dphy;
    
    	u32				vc_select[CSI2RX_STREAMS_MAX];
    	u8				num_pixels[CSI2RX_STREAMS_MAX];
    	u8				lanes[CSI2RX_LANES_MAX];
    	u8				num_lanes;
    	u8				max_lanes;
    	u8				max_streams;
    	bool				has_internal_dphy;
    	u32				events[CSI2RX_NUM_EVENTS];
    
    	struct v4l2_subdev		subdev;
    	struct v4l2_async_notifier	notifier;
    	struct media_pad		pads[CSI2RX_PAD_MAX];
    
    	/* Remote source */
    	struct v4l2_subdev		*source_subdev;
    	int				source_pad;
    };
    
    static const struct csi2rx_fmt formats[] = {
    	{ .code	= MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, },
    	{ .code	= MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, },
    	{ .code	= MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, },
    	{ .code	= MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, },
    	{ .code	= MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, },
    	{ .code	= MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, },
    	{ .code	= MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, },
    	{ .code	= MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, },
    	{ .code	= MEDIA_BUS_FMT_Y8_1X8,     .bpp = 8, },
    	{ .code	= MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SRGGI10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SGRIG10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SBGGI10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SGBIG10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SGIRG10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SIGGR10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SGIBG10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SIGGB10_1X10, .bpp = 10, },
    	{ .code	= MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, },
    	{ .code	= MEDIA_BUS_FMT_SGBRG12_1X12, .bpp = 12, },
    	{ .code	= MEDIA_BUS_FMT_SGRBG12_1X12, .bpp = 12, },
    	{ .code	= MEDIA_BUS_FMT_SRGGB12_1X12, .bpp = 12, },
    	{ .code	= MEDIA_BUS_FMT_RGB565_1X16,  .bpp = 16, },
    	{ .code	= MEDIA_BUS_FMT_RGB888_1X24,  .bpp = 24, },
    	{ .code	= MEDIA_BUS_FMT_BGR888_1X24,  .bpp = 24, },
    };
    
    static void csi2rx_configure_error_irq_mask(void __iomem *base,
    					    struct csi2rx_priv *csi2rx)
    {
    	u32 error_irq_mask = 0;
    
    	error_irq_mask |= CSI2RX_ECC_ERRORS;
    	error_irq_mask |= CSI2RX_PACKET_ERRORS;
    
    	/*
    	 * Iterate through all source pads and check if they are linked
    	 * to an active remote pad. If an active remote pad is found,
    	 * calculate the corresponding bit position and set it in
    	 * mask, enabling the stream overflow error in the mask.
    	 */
    	for (int i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) {
    		struct media_pad *remote_pad;
    
    		remote_pad = media_pad_remote_pad_first(&csi2rx->pads[i]);
    		if (remote_pad) {
    			int pad = i - CSI2RX_PAD_SOURCE_STREAM0;
    			u32 bit_mask = CSI2RX_STREAM0_FIFO_OVERFLOW_IRQ << pad;
    
    			error_irq_mask |= bit_mask;
    		}
    	}
    
    	writel(error_irq_mask, base + CSI2RX_ERROR_IRQS_MASK_REG);
    }
    
    static irqreturn_t csi2rx_irq_handler(int irq, void *dev_id)
    {
    	struct csi2rx_priv *csi2rx = dev_id;
    	int i;
    	u32 error_status;
    
    	error_status = readl(csi2rx->base + CSI2RX_ERROR_IRQS_REG);
    
    	for (i = 0; i < CSI2RX_NUM_EVENTS; i++)
    		if (error_status & csi2rx_events[i].mask)
    			csi2rx->events[i]++;
    
    	writel(error_status, csi2rx->base + CSI2RX_ERROR_IRQS_REG);
    
    	return IRQ_HANDLED;
    }
    
    
    /**
     * cdns_csi2rx_negotiate_ppc - Negotiate pixel-per-clock on output interface
     *
     * @subdev: point to &struct v4l2_subdev
     * @pad: pad number of the source pad
     * @ppc: pointer to requested pixel-per-clock value
     *
     * Returns 0 on success, negative error code otherwise.
     */
    int cdns_csi2rx_negotiate_ppc(struct v4l2_subdev *subdev, unsigned int pad,
    			      u8 *ppc);
    
    static const struct csi2rx_fmt *csi2rx_get_fmt_by_code(u32 code)
    {
    	unsigned int i;
    
    	for (i = 0; i < ARRAY_SIZE(formats); i++)
    		if (formats[i].code == code)
    			return &formats[i];
    
    	return NULL;
    }
    
    static int csi2rx_get_frame_desc_from_source(struct csi2rx_priv *csi2rx,
    					     struct v4l2_mbus_frame_desc *fd)
    {
    	struct media_pad *remote_pad;
    
    	remote_pad = media_entity_remote_source_pad_unique(&csi2rx->subdev.entity);
    	if (!remote_pad) {
    		dev_err(csi2rx->dev, "No remote pad found for sink\n");
    		return -ENODEV;
    	}
    
    	return v4l2_subdev_call(csi2rx->source_subdev, pad, get_frame_desc,
    				remote_pad->index, fd);
    }
    
    static inline
    struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev)
    {
    	return container_of(subdev, struct csi2rx_priv, subdev);
    }
    
    static void csi2rx_reset(struct csi2rx_priv *csi2rx)
    {
    	unsigned int i;
    
    /* Reset module */
    	writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT,
    	       csi2rx->base + CSI2RX_SOFT_RESET_REG);
    	/* Reset individual streams. */
    	for (i = 0; i < csi2rx->max_streams; i++) {
    		writel(CSI2RX_STREAM_CTRL_SOFT_RST,
    		       csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
    }
    
    		usleep_range(10, 20);
    
    	/* Clear resets */
    	writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG);
    	for (i = 0; i < csi2rx->max_streams; i++)
    		writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
    	}
    
    static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx)
    {
    	struct v4l2_ctrl_handler *handler = csi2rx->source_subdev->ctrl_handler;
    	union phy_configure_opts opts = { };
    	struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
    	struct v4l2_mbus_framefmt *framefmt;
    	struct v4l2_subdev_state *state;
    	const struct csi2rx_fmt *fmt;
    	s64 link_freq;
    	int ret;
    
    	if (v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ)) {
    		link_freq = v4l2_get_link_freq(handler, 0, 0);
    	} else {
    	state = v4l2_subdev_get_locked_active_state(&csi2rx->subdev);
    	framefmt = v4l2_subdev_state_get_stream_format(state,
    							       CSI2RX_PAD_SINK,
    							       0);
    	if (framefmt) {
    		fmt = csi2rx_get_fmt_by_code(framefmt->code);
    	} else {
    		dev_err(csi2rx->dev,
    				"Did not find active sink format\n");
    		return -EINVAL;
    	}
    
    	link_freq = v4l2_get_link_freq(handler, fmt->bpp,
    					       2 * csi2rx->num_lanes);
    
    		dev_warn(csi2rx->dev,
    			 "Guessing link frequency using bitdepth of stream 0.\n");
    		dev_warn(csi2rx->dev,
    			 "V4L2_CID_LINK_FREQ control is required for multi format sources.\n");
    	}
    
    	if (link_freq < 0) {
    		dev_err(csi2rx->dev, "Unable to calculate link frequency\n");
    		return link_freq;
    }
    
    	ret = phy_mipi_dphy_get_default_config_for_hsclk(link_freq,
    							 csi2rx->num_lanes, cfg);
    	if (ret)
    		return ret;
    
    	ret = phy_power_on(csi2rx->dphy);
    	if (ret)
    		return ret;
    
    	ret = phy_configure(csi2rx->dphy, &opts);
    	if (ret) {
    		phy_power_off(csi2rx->dphy);
    	return ret;
    }
    
    	return 0;
    }
    
    static int csi2rx_start(struct csi2rx_priv *csi2rx)
    {
    	unsigned int i;
    	unsigned long lanes_used = 0;
    	u32 reg;
    	int ret;
    
    	ret = clk_prepare_enable(csi2rx->p_clk);
    	if (ret)
    		return ret;
    
    	reset_control_deassert(csi2rx->p_rst);
    	csi2rx_reset(csi2rx);
    
    	if (csi2rx->error_irq >= 0)
    		csi2rx_configure_error_irq_mask(csi2rx->base, csi2rx);
    
    	reg = csi2rx->num_lanes << 8;
    	for (i = 0; i < csi2rx->num_lanes; i++) {
    		reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csi2rx->lanes[i]);
    		set_bit(csi2rx->lanes[i], &lanes_used);
    	}
    
    	/*
    	 * Even the unused lanes need to be mapped. In order to avoid
    	 * to map twice to the same physical lane, keep the lanes used
    	 * in the previous loop, and only map unused physical lanes to
    	 * the rest of our logical lanes.
    	 */
    	for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) {
    		unsigned int idx = find_first_zero_bit(&lanes_used,
    						       csi2rx->max_lanes);
    		set_bit(idx, &lanes_used);
    		reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1);
    	}
    
    	writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG);
    
    	/* Enable DPHY clk and data lanes. */
    	if (csi2rx->dphy) {
    		reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST;
    		for (i = 0; i < csi2rx->num_lanes; i++) {
    			reg |= CSI2RX_DPHY_DL_EN(csi2rx->lanes[i] - 1);
    			reg |= CSI2RX_DPHY_DL_RST(csi2rx->lanes[i] - 1);
    		}
    
    		writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
    
    		ret = csi2rx_configure_ext_dphy(csi2rx);
    		if (ret) {
    			dev_err(csi2rx->dev,
    				"Failed to configure external DPHY: %d\n", ret);
    			goto err_disable_pclk;
    		}
    	}
    
    	/*
    	 * Create a static mapping between the CSI virtual channels
    	 * and the output stream.
    	 *
    	 * This should be enhanced, but v4l2 lacks the support for
    	 * changing that mapping dynamically.
    	 *
    	 * We also cannot enable and disable independent streams here,
    	 * hence the reference counting.
    	 */
    	for (i = 0; i < csi2rx->max_streams; i++) {
    		ret = clk_prepare_enable(csi2rx->pixel_clk[i]);
    		if (ret)
    			goto err_disable_pixclk;
    
    	reset_control_deassert(csi2rx->pixel_rst[i]);
    
    		writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF,
    		       csi2rx->base + CSI2RX_STREAM_CFG_REG(i));
    
    		writel(csi2rx->vc_select[i],
    		       csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i));
    
    		writel(CSI2RX_STREAM_CTRL_START,
    		       csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
    	}
    
    	ret = clk_prepare_enable(csi2rx->sys_clk);
    	if (ret)
    		goto err_disable_pixclk;
    
    reset_control_deassert(csi2rx->sys_rst);
    
    	clk_disable_unprepare(csi2rx->p_clk);
    
    	return 0;
    
    err_disable_pixclk:
    	for (; i > 0; i--) {
    		reset_control_assert(csi2rx->pixel_rst[i - 1]);
    		clk_disable_unprepare(csi2rx->pixel_clk[i - 1]);
    }
    
    	if (csi2rx->dphy) {
    		writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
    		phy_power_off(csi2rx->dphy);
    	}
    err_disable_pclk:
    	clk_disable_unprepare(csi2rx->p_clk);
    
    	return ret;
    }
    
    static void csi2rx_stop(struct csi2rx_priv *csi2rx)
    {
    	unsigned int i;
    	u32 val;
    	int ret;
    
    	clk_prepare_enable(csi2rx->p_clk);
    	reset_control_assert(csi2rx->sys_rst);
    	clk_disable_unprepare(csi2rx->sys_clk);
    	writel(0, csi2rx->base + CSI2RX_ERROR_IRQS_MASK_REG);
    
    	for (i = 0; i < csi2rx->max_streams; i++) {
    		writel(CSI2RX_STREAM_CTRL_STOP,
    		       csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
    
    		ret = readl_relaxed_poll_timeout(csi2rx->base +
    						 CSI2RX_STREAM_STATUS_REG(i),
    						 val,
    						 !(val & CSI2RX_STREAM_STATUS_RDY),
    						 10, 10000);
    		if (ret)
    			dev_warn(csi2rx->dev,
    				 "Failed to stop streaming on pad%u\n", i);
    
    		reset_control_assert(csi2rx->pixel_rst[i]);
    		clk_disable_unprepare(csi2rx->pixel_clk[i]);
    	}
    
    	reset_control_assert(csi2rx->p_rst);
    	clk_disable_unprepare(csi2rx->p_clk);
    
    	if (csi2rx->dphy) {
    		writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
    
    		if (phy_power_off(csi2rx->dphy))
    			dev_warn(csi2rx->dev, "Couldn't power off DPHY\n");
    }
    }
    
    static void csi2rx_update_vc_select(struct csi2rx_priv *csi2rx,
    				    struct v4l2_subdev_state *state)
    {
    	struct v4l2_mbus_frame_desc fd = {0};
    	struct v4l2_subdev_route *route;
    	unsigned int i;
    	int ret;
    
    	for (i = 0; i < CSI2RX_STREAMS_MAX; i++)
    		csi2rx->vc_select[i] = 0;
    
    	ret = csi2rx_get_frame_desc_from_source(csi2rx, &fd);
    	if (ret || fd.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
    		dev_dbg(csi2rx->dev,
    			"Failed to get source frame desc, allowing only VC=0\n");
    		goto err_no_fd;
    	}
    
    	/* If source provides per-stream VC info, use it to filter by VC */
    	for_each_active_route(&state->routing, route) {
    		int cdns_stream = route->source_pad - CSI2RX_PAD_SOURCE_STREAM0;
    		u8 used_vc = 0;
    
    		for (i = 0; i < fd.num_entries; i++) {
    			if (fd.entry[i].stream == route->sink_stream) {
    				used_vc = fd.entry[i].bus.csi2.vc;
    				break;
    			}
    		}
    		csi2rx->vc_select[cdns_stream] |=
    			CSI2RX_STREAM_DATA_CFG_VC_SELECT(used_vc);
    	}
    
    err_no_fd:
    	for (i = 0; i < CSI2RX_STREAMS_MAX; i++) {
    		if (!csi2rx->vc_select[i]) {
    			csi2rx->vc_select[i] =
    				CSI2RX_STREAM_DATA_CFG_VC_SELECT(0);
    		}
    	}
    }
    
    static int csi2rx_enable_streams(struct v4l2_subdev *subdev,
    				 struct v4l2_subdev_state *state, u32 pad,
    				 u64 streams_mask)
    {
    	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
    	struct media_pad *remote_pad;
    	u64 sink_streams;
    	int ret;
    
    	remote_pad = media_pad_remote_pad_first(&csi2rx->pads[CSI2RX_PAD_SINK]);
    	if (!remote_pad) {
    		dev_err(csi2rx->dev,
    			"Failed to find connected source\n");
    		return -ENODEV;
    	}
    
    	sink_streams = v4l2_subdev_state_xlate_streams(state,
    						       CSI2RX_PAD_SOURCE_STREAM0,
    						       CSI2RX_PAD_SINK,
    						       &streams_mask);
    
    	mutex_lock(&csi2rx->lock);
    	/*
    	 * If we're not the first users, there's no need to
    	 * enable the whole controller.
    	 */
    	if (!csi2rx->count) {
    		ret = csi2rx_start(csi2rx);
    		if (ret)
    			goto err_stream_start;
    	}
    	/* Start streaming on the source */
    	if ((sink_streams == BIT(2)) || (sink_streams == 0xf) || (sink_streams == 3)) {
    		printk( "Function %s  at line %d\n", __func__, __LINE__);
    		ret = v4l2_subdev_enable_streams(csi2rx->source_subdev, remote_pad->index,
    						 sink_streams);
    		if (ret) {
    			dev_err(csi2rx->dev,
    				"Failed to start streams %#llx on subdev\n",
    				sink_streams);
    			goto err_subdev_enable;
    		}
    	}
    	csi2rx->count++;
    	mutex_unlock(&csi2rx->lock);
    
    	return 0;
    
    err_subdev_enable:
    	if (!csi2rx->count)
    		csi2rx_stop(csi2rx);
    err_stream_start:
    	mutex_unlock(&csi2rx->lock);
    		return ret;
    }
    
    static int csi2rx_disable_streams(struct v4l2_subdev *subdev,
    				  struct v4l2_subdev_state *state, u32 pad,
    				  u64 streams_mask)
    {
    	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
    	struct media_pad *remote_pad;
    	u64 sink_streams;
    	int ret;
    
    //	csi2rx_print_status(csi2rx);
    	sink_streams = v4l2_subdev_state_xlate_streams(state,
    						       CSI2RX_PAD_SOURCE_STREAM0,
    						       CSI2RX_PAD_SINK,
    						       &streams_mask);
    
    	remote_pad = media_pad_remote_pad_first(&csi2rx->pads[CSI2RX_PAD_SINK]);
    	if (!remote_pad) {
    		dev_err(csi2rx->dev,
    			"Failed to find connected source\n");
    		return -ENODEV;
    	}
    
    	/* stop streaming on the source */
    	if ((sink_streams == BIT(2)) || (sink_streams == 0xf) || (sink_streams == 3)) {
    		ret = v4l2_subdev_disable_streams(csi2rx->source_subdev, remote_pad->index,
    						 sink_streams);
    		if (ret) {
    			dev_err(csi2rx->dev,
    				"Failed %d to stop streams %#llx on subdev\n", ret, sink_streams);
    		}
    	}
    
    	mutex_lock(&csi2rx->lock);
    	csi2rx->count--;
    	/*
    	 * Let the last user turn off the lights.
    	 */
    	if (!csi2rx->count)
    		csi2rx_stop(csi2rx);
    	mutex_unlock(&csi2rx->lock);
    
    	return 0;
    }
    
    static int csi2rx_log_status(struct v4l2_subdev *sd)
    {
    	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(sd);
    	unsigned int i;
    
    	for (i = 0; i < CSI2RX_NUM_EVENTS; i++) {
    		if (csi2rx->events[i])
    			dev_info(csi2rx->dev, "%s events: %d\n",
    				 csi2rx_events[i].name,
    				 csi2rx->events[i]);
    	}
    
    	return 0;
    }
    
    static int csi2rx_s_stream_fallback(struct v4l2_subdev *sd, int enable)
    {
    	struct v4l2_subdev_state *state;
    	struct v4l2_subdev_route *route;
    	u64 mask[CSI2RX_PAD_MAX] = {0};
    	int i, ret;
    
    	/* Find the stream mask on all source pads */
    	state = v4l2_subdev_lock_and_get_active_state(sd);
    	for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) {
    		for_each_active_route(&state->routing, route) {
    			if (route->source_pad == i)
    				mask[i] |= BIT_ULL(route->source_stream);
    		}
    	}
    	v4l2_subdev_unlock_state(state);
    
    	/* Start streaming on each pad */
    	for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) {
    		if (enable)
    			ret = v4l2_subdev_enable_streams(sd, i, mask[i]);
    		else
    			ret = v4l2_subdev_disable_streams(sd, i, mask[i]);
    		if (ret)
    			return ret;
    	}
    
    	return ret;
    }
    
    static int csi2rx_enum_mbus_code(struct v4l2_subdev *subdev,
    				 struct v4l2_subdev_state *state,
    				 struct v4l2_subdev_mbus_code_enum *code_enum)
    {
    	if (code_enum->index >= ARRAY_SIZE(formats))
    		return -EINVAL;
    
    	code_enum->code = formats[code_enum->index].code;
    
    	return 0;
    }
    
    static int _csi2rx_set_routing(struct v4l2_subdev *subdev,
    			       struct v4l2_subdev_state *state,
    			       struct v4l2_subdev_krouting *routing)
    {
    	static const struct v4l2_mbus_framefmt format = {
    		.width = 640,
    		.height = 480,
    		.code = MEDIA_BUS_FMT_UYVY8_1X16,
    		.field = V4L2_FIELD_NONE,
    		.colorspace = V4L2_COLORSPACE_SRGB,
    		.ycbcr_enc = V4L2_YCBCR_ENC_601,
    		.quantization = V4L2_QUANTIZATION_LIM_RANGE,
    		.xfer_func = V4L2_XFER_FUNC_SRGB,
    	};
    	int ret;
    
    	if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
    		return -EINVAL;
    
    	ret = v4l2_subdev_routing_validate(subdev, routing,
    					   V4L2_SUBDEV_ROUTING_ONLY_1_TO_1);
    	if (ret)
    		return ret;
    
    	ret = v4l2_subdev_set_routing_with_fmt(subdev, state, routing, &format);
    	if (ret)
    		return ret;
    
    	return 0;
    }
    
    static int csi2rx_set_routing(struct v4l2_subdev *subdev,
    			      struct v4l2_subdev_state *state,
    			      enum v4l2_subdev_format_whence which,
    			      struct v4l2_subdev_krouting *routing)
    {
    	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
    int ret;
    
    	if (which == V4L2_SUBDEV_FORMAT_ACTIVE && csi2rx->count)
    		return -EBUSY;
    
    	ret = _csi2rx_set_routing(subdev, state, routing);
    	if (ret)
    		return ret;
    
    	csi2rx_update_vc_select(csi2rx, state);
    
    	return 0;
    }
    
    static int csi2rx_set_fmt(struct v4l2_subdev *subdev,
    			  struct v4l2_subdev_state *state,
    			  struct v4l2_subdev_format *format)
    {
    	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
    	struct v4l2_mbus_framefmt *fmt;
    
    	if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && csi2rx->count)
    		return -EBUSY;
    
    	/* No transcoding, source and sink formats must match. */
    	if (format->pad >= CSI2RX_PAD_SOURCE_STREAM0)
    		return v4l2_subdev_get_fmt(subdev, state, format);
    	/*
    	 * Default to the first format if the requested media bus code isn't
    	 * supported.
    	 */
    	if (!csi2rx_get_fmt_by_code(format->format.code))
    		format->format.code = formats[0].code;
    
    	/* Set sink format */
    	fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
    						  format->stream);
    	if (!fmt)
    		return -EINVAL;
    
    	*fmt = format->format;
    
    	/* Propagate to source format */
    	fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
    							   format->stream);
    	if (!fmt)
    		return -EINVAL;
    
    	*fmt = format->format;
    
    	return 0;
    }
    
    static int csi2rx_init_cfg(struct v4l2_subdev *subdev,
    			   struct v4l2_subdev_state *state)
    {
    	struct v4l2_subdev_route routes[] = {
    		{
    			.sink_pad = CSI2RX_PAD_SINK,
    			.sink_stream = 0,
    			.source_pad = CSI2RX_PAD_SOURCE_STREAM0,
    			.source_stream = 0,
    			.flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
    		},
    	};
    
    	struct v4l2_subdev_krouting routing = {
    		.num_routes = ARRAY_SIZE(routes),
    		.routes = routes,
    	};
    
    	return _csi2rx_set_routing(subdev, state, &routing);
    }
    
    static int csi2rx_get_frame_desc(struct v4l2_subdev *subdev, unsigned int pad,
    				 struct v4l2_mbus_frame_desc *fd)
    {
    	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
    
    	return csi2rx_get_frame_desc_from_source(csi2rx, fd);
    }
    int cdns_csi2rx_negotiate_ppc(struct v4l2_subdev *subdev, unsigned int pad,
    			      u8 *ppc)
    {
    	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
    	const struct csi2rx_fmt *csi_fmt;
    	struct v4l2_subdev_route *route;
    	struct v4l2_subdev_state *state;
    	struct v4l2_mbus_framefmt *fmt;
    	int ret = 0;
    
    	if (!ppc || pad < CSI2RX_PAD_SOURCE_STREAM0 || pad >= CSI2RX_PAD_MAX)
    		return -EINVAL;
    
    	state = v4l2_subdev_lock_and_get_active_state(subdev);
    	/* Check all streams on requested pad */
    	for_each_active_route(&state->routing, route) {
    		if (route->source_pad != pad)
    			continue;
    
    		fmt = v4l2_subdev_state_get_stream_format(state, route->source_pad,
    						   route->source_stream);
    		if (!fmt) {
    			ret = -EPIPE;
    			*ppc = 1;
    			break;
    		}
    
    		csi_fmt = csi2rx_get_fmt_by_code(fmt->code);
    		if (!csi_fmt) {
    			ret = -EINVAL;
    			*ppc = 1;
    			break;
    		}
    
    		/* Reduce requested PPC if it is too high for this stream */
    		*ppc = min(*ppc, csi_fmt->max_pixels);
    	}
    	v4l2_subdev_unlock_state(state);
    
    	csi2rx->num_pixels[pad - CSI2RX_PAD_SOURCE_STREAM0] =
    		CSI2RX_STREAM_CFG_NUM_PIXELS(*ppc);
    
    	return ret;
    }
    EXPORT_SYMBOL(cdns_csi2rx_negotiate_ppc);
    
    static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = {
    .enum_mbus_code		= csi2rx_enum_mbus_code,
    	.get_fmt		= v4l2_subdev_get_fmt,
    	.set_fmt		= csi2rx_set_fmt,
    	.init_cfg		= csi2rx_init_cfg,
    	.get_frame_desc		= csi2rx_get_frame_desc,
    	.set_routing		= csi2rx_set_routing,
    	.enable_streams		= csi2rx_enable_streams,
    	.disable_streams	= csi2rx_disable_streams,
    };
    
    static const struct v4l2_subdev_core_ops csi2rx_core_ops = {
    	.log_status	= csi2rx_log_status,
    };
    
    static const struct v4l2_subdev_video_ops csi2rx_video_ops = {
    	.s_stream	= csi2rx_s_stream_fallback,
    };
    
    static const struct v4l2_subdev_ops csi2rx_subdev_ops = {
    	.core		= &csi2rx_core_ops,
    	.video		= &csi2rx_video_ops,
    	.pad		= &csi2rx_pad_ops,
    };
    
    static const struct media_entity_operations csi2rx_media_ops = {
    	.link_validate = v4l2_subdev_link_validate,
    };
    
    static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
    			      struct v4l2_subdev *s_subdev,
    			      struct v4l2_async_connection *asd)
    {
    	struct v4l2_subdev *subdev = notifier->sd;
    	struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
    
    	csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity,
    							 asd->match.fwnode,
    							 MEDIA_PAD_FL_SOURCE);
    	if (csi2rx->source_pad < 0) {
    		dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n",
    			s_subdev->name);
    		return csi2rx->source_pad;
    	}
    
    	csi2rx->source_subdev = s_subdev;
    
    	dev_dbg(csi2rx->dev, "Bound %s pad: %d\n", s_subdev->name,
    		csi2rx->source_pad);
    
    	return media_create_pad_link(&csi2rx->source_subdev->entity,
    				     csi2rx->source_pad,
    				     &csi2rx->subdev.entity, 0,
    				     MEDIA_LNK_FL_ENABLED |
    				     MEDIA_LNK_FL_IMMUTABLE);
    }
    
    static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = {
    	.bound		= csi2rx_async_bound,
    };
    
    static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
    				struct platform_device *pdev)
    {
    	unsigned char i;
    	u32 dev_cfg;
    	int ret;
    
    	csi2rx->base = devm_platform_ioremap_resource(pdev, 0);
    	if (IS_ERR(csi2rx->base))
    		return PTR_ERR(csi2rx->base);
    
    	csi2rx->sys_clk = devm_clk_get(&pdev->dev, "sys_clk");
    	if (IS_ERR(csi2rx->sys_clk)) {
    		dev_err(&pdev->dev, "Couldn't get sys clock\n");
    		return PTR_ERR(csi2rx->sys_clk);
    	}
    
    	csi2rx->p_clk = devm_clk_get(&pdev->dev, "p_clk");
    	if (IS_ERR(csi2rx->p_clk)) {
    		dev_err(&pdev->dev, "Couldn't get P clock\n");
    		return PTR_ERR(csi2rx->p_clk);
    	}
    
    	csi2rx->sys_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
    								    "sys");
    	if (IS_ERR(csi2rx->sys_rst))
    		return PTR_ERR(csi2rx->sys_rst);
    
    	csi2rx->p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
    								  "reg_bank");
    	if (IS_ERR(csi2rx->p_rst))
    		return PTR_ERR(csi2rx->p_rst);
    
    	csi2rx->dphy = devm_phy_optional_get(&pdev->dev, "dphy");
    	if (IS_ERR(csi2rx->dphy)) {
    		dev_err(&pdev->dev, "Couldn't get external D-PHY\n");
    		return PTR_ERR(csi2rx->dphy);
    	}
    
    	ret = clk_prepare_enable(csi2rx->p_clk);
    	if (ret) {
    		dev_err(&pdev->dev, "Couldn't prepare and enable P clock\n");
    		return ret;
    	}
    
    	dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG);
    	clk_disable_unprepare(csi2rx->p_clk);
    
    	csi2rx->max_lanes = dev_cfg & 7;
    	if (csi2rx->max_lanes > CSI2RX_LANES_MAX) {
    		dev_err(&pdev->dev, "Invalid number of lanes: %u\n",
    			csi2rx->max_lanes);
    		return -EINVAL;
    	}
    
    	csi2rx->max_streams = (dev_cfg >> 4) & 7;
    	if (csi2rx->max_streams > CSI2RX_STREAMS_MAX) {
    		dev_err(&pdev->dev, "Invalid number of streams: %u\n",
    			csi2rx->max_streams);
    		return -EINVAL;
    	}
    
    	csi2rx->has_internal_dphy = dev_cfg & BIT(3) ? true : false;
    
    	/*
    	 * FIXME: Once we'll have internal D-PHY support, the check
    	 * will need to be removed.
    	 */
    	if (!csi2rx->dphy && csi2rx->has_internal_dphy) {
    		dev_err(&pdev->dev, "Internal D-PHY not supported yet\n");
    		return -EINVAL;
    	}
    
    	for (i = 0; i < csi2rx->max_streams; i++) {
    		char name[16];
    
    		snprintf(name, sizeof(name), "pixel_if%u_clk", i);
    		csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, name);
    		if (IS_ERR(csi2rx->pixel_clk[i])) {
    			dev_err(&pdev->dev, "Couldn't get clock %s\n", name);
    			return PTR_ERR(csi2rx->pixel_clk[i]);
    		}
    
    		snprintf(name, sizeof(name), "pixel_if%u", i);
    		csi2rx->pixel_rst[i] =
    			devm_reset_control_get_optional_exclusive(&pdev->dev,
    								  name);
    		if (IS_ERR(csi2rx->pixel_rst[i]))
    			return PTR_ERR(csi2rx->pixel_rst[i]);
    	}
    
    	return 0;
    }
    
    static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
    {
    	struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
    	struct v4l2_async_connection *asd;
    	struct fwnode_handle *fwh;
    	struct device_node *ep;
    	int ret;
    
    	ep = of_graph_get_endpoint_by_regs(csi2rx->dev->of_node, 0, 0);
    	if (!ep)
    		return -EINVAL;
    
    	fwh = of_fwnode_handle(ep);
    	ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
    	if (ret) {
    		dev_err(csi2rx->dev, "Could not parse v4l2 endpoint\n");
    		of_node_put(ep);
    		return ret;
    	}
    
    	if (v4l2_ep.bus_type != V4L2_MBUS_CSI2_DPHY) {
    		dev_err(csi2rx->dev, "Unsupported media bus type: 0x%x\n",
    			v4l2_ep.bus_type);
    		of_node_put(ep);
    		return -EINVAL;
    	}
    
    	memcpy(csi2rx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes,
    	       sizeof(csi2rx->lanes));
    	csi2rx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
    	if (csi2rx->num_lanes > csi2rx->max_lanes) {
    		dev_err(csi2rx->dev, "Unsupported number of data-lanes: %d\n",
    			csi2rx->num_lanes);
    		of_node_put(ep);
    		return -EINVAL;
    	}
    
    	v4l2_async_subdev_nf_init(&csi2rx->notifier, &csi2rx->subdev);
    
    	asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
    					      struct v4l2_async_connection);
    	of_node_put(ep);
    	if (IS_ERR(asd)) {
    		v4l2_async_nf_cleanup(&csi2rx->notifier);
    		return PTR_ERR(asd);
    	}
    
    	csi2rx->notifier.ops = &csi2rx_notifier_ops;
    
    	ret = v4l2_async_nf_register(&csi2rx->notifier);
    	if (ret)
    		v4l2_async_nf_cleanup(&csi2rx->notifier);
    
    	return ret;
    }
    
    static int csi2rx_probe(struct platform_device *pdev)
    {
    	struct csi2rx_priv *csi2rx;
    	unsigned int i;
    	int ret;
    
    	csi2rx = kzalloc(sizeof(*csi2rx), GFP_KERNEL);
    	if (!csi2rx)
    		return -ENOMEM;
    	platform_set_drvdata(pdev, csi2rx);
    	csi2rx->dev = &pdev->dev;
    	mutex_init(&csi2rx->lock);
    
    	ret = csi2rx_get_resources(csi2rx, pdev);
    	if (ret)
    		goto err_free_priv;
    
    	ret = csi2rx_parse_dt(csi2rx);
    	if (ret)
    		goto err_free_priv;
    
    	csi2rx->subdev.owner = THIS_MODULE;
    	csi2rx->subdev.dev = &pdev->dev;
    	v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops);
    	v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev);
    	snprintf(csi2rx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s",
    		 KBUILD_MODNAME, dev_name(&pdev->dev));
    
    	/* Create our media pads */
    	csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
    	csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
    	for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++)
    		csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE;
    	csi2rx->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
    		V4L2_SUBDEV_FL_STREAMS;
    	csi2rx->subdev.entity.ops = &csi2rx_media_ops;
    
    	ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX,
    				     csi2rx->pads);
    	if (ret)
    		goto err_cleanup;
    
    	csi2rx->error_irq = platform_get_irq_byname_optional(pdev, "error_irq");
    
    	if (csi2rx->error_irq < 0) {
    		dev_dbg(csi2rx->dev, "Optional interrupt not defined, proceeding without it\n");
    	} else {
    		ret = devm_request_irq(csi2rx->dev, csi2rx->error_irq,
    				       csi2rx_irq_handler, 0,
    				       dev_name(&pdev->dev), csi2rx);
    		if (ret) {
    			dev_err(csi2rx->dev,
    				"Unable to request interrupt: %d\n", ret);
    			goto err_cleanup;
    		}
    	}
    
    	ret = v4l2_subdev_init_finalize(&csi2rx->subdev);
    	if (ret)
    		goto err_cleanup;
    
    	ret = v4l2_async_register_subdev(&csi2rx->subdev);
    	if (ret < 0)
    		goto err_free_state;
    
    	dev_info(&pdev->dev,
    		 "Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n",
    		 csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams,
    		 csi2rx->dphy ? "external" :
    		 csi2rx->has_internal_dphy ? "internal" : "no");
    
    	return 0;
    
    err_free_state:
    	v4l2_subdev_cleanup(&csi2rx->subdev);
    	err_cleanup:
    	v4l2_async_nf_unregister(&csi2rx->notifier);
    	v4l2_async_nf_cleanup(&csi2rx->notifier);
    	media_entity_cleanup(&csi2rx->subdev.entity);
    err_free_priv:
    	kfree(csi2rx);
    	return ret;
    }
    
    static void csi2rx_remove(struct platform_device *pdev)
    {
    	struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
    
    	v4l2_async_nf_unregister(&csi2rx->notifier);
    	v4l2_async_nf_cleanup(&csi2rx->notifier);
    	v4l2_async_unregister_subdev(&csi2rx->subdev);
    	v4l2_subdev_cleanup(&csi2rx->subdev);
    	media_entity_cleanup(&csi2rx->subdev.entity);
    		kfree(csi2rx);
    }
    
    static const struct of_device_id csi2rx_of_table[] = {
    	{ .compatible = "starfive,jh7110-csi2rx" },
    	{ .compatible = "cdns,csi2rx" },
    	{ },
    };
    MODULE_DEVICE_TABLE(of, csi2rx_of_table);
    
    static struct platform_driver csi2rx_driver = {
    	.probe	= csi2rx_probe,
    	.remove_new = csi2rx_remove,
    
    	.driver	= {
    		.name		= "cdns-csi2rx",
    		.of_match_table	= csi2rx_of_table,
    			},
    };
    module_platform_driver(csi2rx_driver);
    MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
    MODULE_DESCRIPTION("Cadence CSI2-RX controller");
    MODULE_LICENSE("GPL");
    

  • Hello Xiangxu,

    Were you trying to ask something?

    Thanks,

    Jianzhong

  • Hi Jianzhong,

    This issue can be closed, just upload the upgrade changes to track.

    thanks.

    Regards

    Joe