cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ispstat.c (30268B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * ispstat.c
      4 *
      5 * TI OMAP3 ISP - Statistics core
      6 *
      7 * Copyright (C) 2010 Nokia Corporation
      8 * Copyright (C) 2009 Texas Instruments, Inc
      9 *
     10 * Contacts: David Cohen <dacohen@gmail.com>
     11 *	     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
     12 *	     Sakari Ailus <sakari.ailus@iki.fi>
     13 */
     14
     15#include <linux/dma-mapping.h>
     16#include <linux/slab.h>
     17#include <linux/timekeeping.h>
     18#include <linux/uaccess.h>
     19
     20#include "isp.h"
     21
     22#define ISP_STAT_USES_DMAENGINE(stat)	((stat)->dma_ch != NULL)
     23
     24/*
     25 * MAGIC_SIZE must always be the greatest common divisor of
     26 * AEWB_PACKET_SIZE and AF_PAXEL_SIZE.
     27 */
     28#define MAGIC_SIZE		16
     29#define MAGIC_NUM		0x55
     30
     31/* HACK: AF module seems to be writing one more paxel data than it should. */
     32#define AF_EXTRA_DATA		OMAP3ISP_AF_PAXEL_SIZE
     33
     34/*
     35 * HACK: H3A modules go to an invalid state after have a SBL overflow. It makes
     36 * the next buffer to start to be written in the same point where the overflow
     37 * occurred instead of the configured address. The only known way to make it to
     38 * go back to a valid state is having a valid buffer processing. Of course it
     39 * requires at least a doubled buffer size to avoid an access to invalid memory
     40 * region. But it does not fix everything. It may happen more than one
     41 * consecutive SBL overflows. In that case, it might be unpredictable how many
     42 * buffers the allocated memory should fit. For that case, a recover
     43 * configuration was created. It produces the minimum buffer size for each H3A
     44 * module and decrease the change for more SBL overflows. This recover state
     45 * will be enabled every time a SBL overflow occur. As the output buffer size
     46 * isn't big, it's possible to have an extra size able to fit many recover
     47 * buffers making it extreamily unlikely to have an access to invalid memory
     48 * region.
     49 */
     50#define NUM_H3A_RECOVER_BUFS	10
     51
     52/*
     53 * HACK: Because of HW issues the generic layer sometimes need to have
     54 * different behaviour for different statistic modules.
     55 */
     56#define IS_H3A_AF(stat)		((stat) == &(stat)->isp->isp_af)
     57#define IS_H3A_AEWB(stat)	((stat) == &(stat)->isp->isp_aewb)
     58#define IS_H3A(stat)		(IS_H3A_AF(stat) || IS_H3A_AEWB(stat))
     59
     60static void __isp_stat_buf_sync_magic(struct ispstat *stat,
     61				      struct ispstat_buffer *buf,
     62				      u32 buf_size, enum dma_data_direction dir,
     63				      void (*dma_sync)(struct device *,
     64					dma_addr_t, unsigned long, size_t,
     65					enum dma_data_direction))
     66{
     67	/* Sync the initial and final magic words. */
     68	dma_sync(stat->isp->dev, buf->dma_addr, 0, MAGIC_SIZE, dir);
     69	dma_sync(stat->isp->dev, buf->dma_addr + (buf_size & PAGE_MASK),
     70		 buf_size & ~PAGE_MASK, MAGIC_SIZE, dir);
     71}
     72
     73static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat,
     74					       struct ispstat_buffer *buf,
     75					       u32 buf_size,
     76					       enum dma_data_direction dir)
     77{
     78	if (ISP_STAT_USES_DMAENGINE(stat))
     79		return;
     80
     81	__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
     82				  dma_sync_single_range_for_device);
     83}
     84
     85static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat,
     86					    struct ispstat_buffer *buf,
     87					    u32 buf_size,
     88					    enum dma_data_direction dir)
     89{
     90	if (ISP_STAT_USES_DMAENGINE(stat))
     91		return;
     92
     93	__isp_stat_buf_sync_magic(stat, buf, buf_size, dir,
     94				  dma_sync_single_range_for_cpu);
     95}
     96
     97static int isp_stat_buf_check_magic(struct ispstat *stat,
     98				    struct ispstat_buffer *buf)
     99{
    100	const u32 buf_size = IS_H3A_AF(stat) ?
    101			     buf->buf_size + AF_EXTRA_DATA : buf->buf_size;
    102	u8 *w;
    103	u8 *end;
    104	int ret = -EINVAL;
    105
    106	isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
    107
    108	/* Checking initial magic numbers. They shouldn't be here anymore. */
    109	for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++)
    110		if (likely(*w != MAGIC_NUM))
    111			ret = 0;
    112
    113	if (ret) {
    114		dev_dbg(stat->isp->dev,
    115			"%s: beginning magic check does not match.\n",
    116			stat->subdev.name);
    117		return ret;
    118	}
    119
    120	/* Checking magic numbers at the end. They must be still here. */
    121	for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE;
    122	     w < end; w++) {
    123		if (unlikely(*w != MAGIC_NUM)) {
    124			dev_dbg(stat->isp->dev,
    125				"%s: ending magic check does not match.\n",
    126				stat->subdev.name);
    127			return -EINVAL;
    128		}
    129	}
    130
    131	isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
    132					   DMA_FROM_DEVICE);
    133
    134	return 0;
    135}
    136
    137static void isp_stat_buf_insert_magic(struct ispstat *stat,
    138				      struct ispstat_buffer *buf)
    139{
    140	const u32 buf_size = IS_H3A_AF(stat) ?
    141			     stat->buf_size + AF_EXTRA_DATA : stat->buf_size;
    142
    143	isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE);
    144
    145	/*
    146	 * Inserting MAGIC_NUM at the beginning and end of the buffer.
    147	 * buf->buf_size is set only after the buffer is queued. For now the
    148	 * right buf_size for the current configuration is pointed by
    149	 * stat->buf_size.
    150	 */
    151	memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE);
    152	memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE);
    153
    154	isp_stat_buf_sync_magic_for_device(stat, buf, buf_size,
    155					   DMA_BIDIRECTIONAL);
    156}
    157
    158static void isp_stat_buf_sync_for_device(struct ispstat *stat,
    159					 struct ispstat_buffer *buf)
    160{
    161	if (ISP_STAT_USES_DMAENGINE(stat))
    162		return;
    163
    164	dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl,
    165			       buf->sgt.nents, DMA_FROM_DEVICE);
    166}
    167
    168static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
    169				      struct ispstat_buffer *buf)
    170{
    171	if (ISP_STAT_USES_DMAENGINE(stat))
    172		return;
    173
    174	dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
    175			    buf->sgt.nents, DMA_FROM_DEVICE);
    176}
    177
    178static void isp_stat_buf_clear(struct ispstat *stat)
    179{
    180	int i;
    181
    182	for (i = 0; i < STAT_MAX_BUFS; i++)
    183		stat->buf[i].empty = 1;
    184}
    185
    186static struct ispstat_buffer *
    187__isp_stat_buf_find(struct ispstat *stat, int look_empty)
    188{
    189	struct ispstat_buffer *found = NULL;
    190	int i;
    191
    192	for (i = 0; i < STAT_MAX_BUFS; i++) {
    193		struct ispstat_buffer *curr = &stat->buf[i];
    194
    195		/*
    196		 * Don't select the buffer which is being copied to
    197		 * userspace or used by the module.
    198		 */
    199		if (curr == stat->locked_buf || curr == stat->active_buf)
    200			continue;
    201
    202		/* Don't select uninitialised buffers if it's not required */
    203		if (!look_empty && curr->empty)
    204			continue;
    205
    206		/* Pick uninitialised buffer over anything else if look_empty */
    207		if (curr->empty) {
    208			found = curr;
    209			break;
    210		}
    211
    212		/* Choose the oldest buffer */
    213		if (!found ||
    214		    (s32)curr->frame_number - (s32)found->frame_number < 0)
    215			found = curr;
    216	}
    217
    218	return found;
    219}
    220
    221static inline struct ispstat_buffer *
    222isp_stat_buf_find_oldest(struct ispstat *stat)
    223{
    224	return __isp_stat_buf_find(stat, 0);
    225}
    226
    227static inline struct ispstat_buffer *
    228isp_stat_buf_find_oldest_or_empty(struct ispstat *stat)
    229{
    230	return __isp_stat_buf_find(stat, 1);
    231}
    232
    233static int isp_stat_buf_queue(struct ispstat *stat)
    234{
    235	if (!stat->active_buf)
    236		return STAT_NO_BUF;
    237
    238	ktime_get_ts64(&stat->active_buf->ts);
    239
    240	stat->active_buf->buf_size = stat->buf_size;
    241	if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
    242		dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n",
    243			stat->subdev.name);
    244		return STAT_NO_BUF;
    245	}
    246	stat->active_buf->config_counter = stat->config_counter;
    247	stat->active_buf->frame_number = stat->frame_number;
    248	stat->active_buf->empty = 0;
    249	stat->active_buf = NULL;
    250
    251	return STAT_BUF_DONE;
    252}
    253
    254/* Get next free buffer to write the statistics to and mark it active. */
    255static void isp_stat_buf_next(struct ispstat *stat)
    256{
    257	if (unlikely(stat->active_buf))
    258		/* Overwriting unused active buffer */
    259		dev_dbg(stat->isp->dev,
    260			"%s: new buffer requested without queuing active one.\n",
    261			stat->subdev.name);
    262	else
    263		stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat);
    264}
    265
    266static void isp_stat_buf_release(struct ispstat *stat)
    267{
    268	unsigned long flags;
    269
    270	isp_stat_buf_sync_for_device(stat, stat->locked_buf);
    271	spin_lock_irqsave(&stat->isp->stat_lock, flags);
    272	stat->locked_buf = NULL;
    273	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
    274}
    275
    276/* Get buffer to userspace. */
    277static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat,
    278					       struct omap3isp_stat_data *data)
    279{
    280	int rval = 0;
    281	unsigned long flags;
    282	struct ispstat_buffer *buf;
    283
    284	spin_lock_irqsave(&stat->isp->stat_lock, flags);
    285
    286	while (1) {
    287		buf = isp_stat_buf_find_oldest(stat);
    288		if (!buf) {
    289			spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
    290			dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n",
    291				stat->subdev.name);
    292			return ERR_PTR(-EBUSY);
    293		}
    294		if (isp_stat_buf_check_magic(stat, buf)) {
    295			dev_dbg(stat->isp->dev,
    296				"%s: current buffer has corrupted data\n.",
    297				stat->subdev.name);
    298			/* Mark empty because it doesn't have valid data. */
    299			buf->empty = 1;
    300		} else {
    301			/* Buffer isn't corrupted. */
    302			break;
    303		}
    304	}
    305
    306	stat->locked_buf = buf;
    307
    308	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
    309
    310	if (buf->buf_size > data->buf_size) {
    311		dev_warn(stat->isp->dev,
    312			 "%s: userspace's buffer size is not enough.\n",
    313			 stat->subdev.name);
    314		isp_stat_buf_release(stat);
    315		return ERR_PTR(-EINVAL);
    316	}
    317
    318	isp_stat_buf_sync_for_cpu(stat, buf);
    319
    320	rval = copy_to_user(data->buf,
    321			    buf->virt_addr,
    322			    buf->buf_size);
    323
    324	if (rval) {
    325		dev_info(stat->isp->dev,
    326			 "%s: failed copying %d bytes of stat data\n",
    327			 stat->subdev.name, rval);
    328		buf = ERR_PTR(-EFAULT);
    329		isp_stat_buf_release(stat);
    330	}
    331
    332	return buf;
    333}
    334
    335static void isp_stat_bufs_free(struct ispstat *stat)
    336{
    337	struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
    338			   ? NULL : stat->isp->dev;
    339	unsigned int i;
    340
    341	for (i = 0; i < STAT_MAX_BUFS; i++) {
    342		struct ispstat_buffer *buf = &stat->buf[i];
    343
    344		if (!buf->virt_addr)
    345			continue;
    346
    347		sg_free_table(&buf->sgt);
    348
    349		dma_free_coherent(dev, stat->buf_alloc_size, buf->virt_addr,
    350				  buf->dma_addr);
    351
    352		buf->dma_addr = 0;
    353		buf->virt_addr = NULL;
    354		buf->empty = 1;
    355	}
    356
    357	dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n",
    358		stat->subdev.name);
    359
    360	stat->buf_alloc_size = 0;
    361	stat->active_buf = NULL;
    362}
    363
    364static int isp_stat_bufs_alloc_one(struct device *dev,
    365				   struct ispstat_buffer *buf,
    366				   unsigned int size)
    367{
    368	int ret;
    369
    370	buf->virt_addr = dma_alloc_coherent(dev, size, &buf->dma_addr,
    371					    GFP_KERNEL);
    372	if (!buf->virt_addr)
    373		return -ENOMEM;
    374
    375	ret = dma_get_sgtable(dev, &buf->sgt, buf->virt_addr, buf->dma_addr,
    376			      size);
    377	if (ret < 0) {
    378		dma_free_coherent(dev, size, buf->virt_addr, buf->dma_addr);
    379		buf->virt_addr = NULL;
    380		buf->dma_addr = 0;
    381		return ret;
    382	}
    383
    384	return 0;
    385}
    386
    387/*
    388 * The device passed to the DMA API depends on whether the statistics block uses
    389 * ISP DMA, external DMA or PIO to transfer data.
    390 *
    391 * The first case (for the AEWB and AF engines) passes the ISP device, resulting
    392 * in the DMA buffers being mapped through the ISP IOMMU.
    393 *
    394 * The second case (for the histogram engine) should pass the DMA engine device.
    395 * As that device isn't accessible through the OMAP DMA engine API the driver
    396 * passes NULL instead, resulting in the buffers being mapped directly as
    397 * physical pages.
    398 *
    399 * The third case (for the histogram engine) doesn't require any mapping. The
    400 * buffers could be allocated with kmalloc/vmalloc, but we still use
    401 * dma_alloc_coherent() for consistency purpose.
    402 */
    403static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
    404{
    405	struct device *dev = ISP_STAT_USES_DMAENGINE(stat)
    406			   ? NULL : stat->isp->dev;
    407	unsigned long flags;
    408	unsigned int i;
    409
    410	spin_lock_irqsave(&stat->isp->stat_lock, flags);
    411
    412	BUG_ON(stat->locked_buf != NULL);
    413
    414	/* Are the old buffers big enough? */
    415	if (stat->buf_alloc_size >= size) {
    416		spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
    417		return 0;
    418	}
    419
    420	if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) {
    421		dev_info(stat->isp->dev,
    422			 "%s: trying to allocate memory when busy\n",
    423			 stat->subdev.name);
    424		spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
    425		return -EBUSY;
    426	}
    427
    428	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
    429
    430	isp_stat_bufs_free(stat);
    431
    432	stat->buf_alloc_size = size;
    433
    434	for (i = 0; i < STAT_MAX_BUFS; i++) {
    435		struct ispstat_buffer *buf = &stat->buf[i];
    436		int ret;
    437
    438		ret = isp_stat_bufs_alloc_one(dev, buf, size);
    439		if (ret < 0) {
    440			dev_err(stat->isp->dev,
    441				"%s: Failed to allocate DMA buffer %u\n",
    442				stat->subdev.name, i);
    443			isp_stat_bufs_free(stat);
    444			return ret;
    445		}
    446
    447		buf->empty = 1;
    448
    449		dev_dbg(stat->isp->dev,
    450			"%s: buffer[%u] allocated. dma=%pad virt=%p",
    451			stat->subdev.name, i, &buf->dma_addr, buf->virt_addr);
    452	}
    453
    454	return 0;
    455}
    456
    457static void isp_stat_queue_event(struct ispstat *stat, int err)
    458{
    459	struct video_device *vdev = stat->subdev.devnode;
    460	struct v4l2_event event;
    461	struct omap3isp_stat_event_status *status = (void *)event.u.data;
    462
    463	memset(&event, 0, sizeof(event));
    464	if (!err) {
    465		status->frame_number = stat->frame_number;
    466		status->config_counter = stat->config_counter;
    467	} else {
    468		status->buf_err = 1;
    469	}
    470	event.type = stat->event_type;
    471	v4l2_event_queue(vdev, &event);
    472}
    473
    474
    475/*
    476 * omap3isp_stat_request_statistics - Request statistics.
    477 * @data: Pointer to return statistics data.
    478 *
    479 * Returns 0 if successful.
    480 */
    481int omap3isp_stat_request_statistics(struct ispstat *stat,
    482				     struct omap3isp_stat_data *data)
    483{
    484	struct ispstat_buffer *buf;
    485
    486	if (stat->state != ISPSTAT_ENABLED) {
    487		dev_dbg(stat->isp->dev, "%s: engine not enabled.\n",
    488			stat->subdev.name);
    489		return -EINVAL;
    490	}
    491
    492	mutex_lock(&stat->ioctl_lock);
    493	buf = isp_stat_buf_get(stat, data);
    494	if (IS_ERR(buf)) {
    495		mutex_unlock(&stat->ioctl_lock);
    496		return PTR_ERR(buf);
    497	}
    498
    499	data->ts.tv_sec = buf->ts.tv_sec;
    500	data->ts.tv_usec = buf->ts.tv_nsec / NSEC_PER_USEC;
    501	data->config_counter = buf->config_counter;
    502	data->frame_number = buf->frame_number;
    503	data->buf_size = buf->buf_size;
    504
    505	buf->empty = 1;
    506	isp_stat_buf_release(stat);
    507	mutex_unlock(&stat->ioctl_lock);
    508
    509	return 0;
    510}
    511
    512int omap3isp_stat_request_statistics_time32(struct ispstat *stat,
    513					struct omap3isp_stat_data_time32 *data)
    514{
    515	struct omap3isp_stat_data data64 = { };
    516	int ret;
    517
    518	ret = omap3isp_stat_request_statistics(stat, &data64);
    519	if (ret)
    520		return ret;
    521
    522	data->ts.tv_sec = data64.ts.tv_sec;
    523	data->ts.tv_usec = data64.ts.tv_usec;
    524	data->buf = (uintptr_t)data64.buf;
    525	memcpy(&data->frame, &data64.frame, sizeof(data->frame));
    526
    527	return 0;
    528}
    529
    530/*
    531 * omap3isp_stat_config - Receives new statistic engine configuration.
    532 * @new_conf: Pointer to config structure.
    533 *
    534 * Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if
    535 * was unable to allocate memory for the buffer, or other errors if parameters
    536 * are invalid.
    537 */
    538int omap3isp_stat_config(struct ispstat *stat, void *new_conf)
    539{
    540	int ret;
    541	unsigned long irqflags;
    542	struct ispstat_generic_config *user_cfg = new_conf;
    543	u32 buf_size = user_cfg->buf_size;
    544
    545	mutex_lock(&stat->ioctl_lock);
    546
    547	dev_dbg(stat->isp->dev,
    548		"%s: configuring module with buffer size=0x%08lx\n",
    549		stat->subdev.name, (unsigned long)buf_size);
    550
    551	ret = stat->ops->validate_params(stat, new_conf);
    552	if (ret) {
    553		mutex_unlock(&stat->ioctl_lock);
    554		dev_dbg(stat->isp->dev, "%s: configuration values are invalid.\n",
    555			stat->subdev.name);
    556		return ret;
    557	}
    558
    559	if (buf_size != user_cfg->buf_size)
    560		dev_dbg(stat->isp->dev,
    561			"%s: driver has corrected buffer size request to 0x%08lx\n",
    562			stat->subdev.name,
    563			(unsigned long)user_cfg->buf_size);
    564
    565	/*
    566	 * Hack: H3A modules may need a doubled buffer size to avoid access
    567	 * to a invalid memory address after a SBL overflow.
    568	 * The buffer size is always PAGE_ALIGNED.
    569	 * Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be
    570	 * inserted at the end to data integrity check purpose.
    571	 * Hack 3: AF module writes one paxel data more than it should, so
    572	 * the buffer allocation must consider it to avoid invalid memory
    573	 * access.
    574	 * Hack 4: H3A need to allocate extra space for the recover state.
    575	 */
    576	if (IS_H3A(stat)) {
    577		buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE;
    578		if (IS_H3A_AF(stat))
    579			/*
    580			 * Adding one extra paxel data size for each recover
    581			 * buffer + 2 regular ones.
    582			 */
    583			buf_size += AF_EXTRA_DATA * (NUM_H3A_RECOVER_BUFS + 2);
    584		if (stat->recover_priv) {
    585			struct ispstat_generic_config *recover_cfg =
    586				stat->recover_priv;
    587			buf_size += recover_cfg->buf_size *
    588				    NUM_H3A_RECOVER_BUFS;
    589		}
    590		buf_size = PAGE_ALIGN(buf_size);
    591	} else { /* Histogram */
    592		buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE);
    593	}
    594
    595	ret = isp_stat_bufs_alloc(stat, buf_size);
    596	if (ret) {
    597		mutex_unlock(&stat->ioctl_lock);
    598		return ret;
    599	}
    600
    601	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
    602	stat->ops->set_params(stat, new_conf);
    603	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    604
    605	/*
    606	 * Returning the right future config_counter for this setup, so
    607	 * userspace can *know* when it has been applied.
    608	 */
    609	user_cfg->config_counter = stat->config_counter + stat->inc_config;
    610
    611	/* Module has a valid configuration. */
    612	stat->configured = 1;
    613	dev_dbg(stat->isp->dev,
    614		"%s: module has been successfully configured.\n",
    615		stat->subdev.name);
    616
    617	mutex_unlock(&stat->ioctl_lock);
    618
    619	return 0;
    620}
    621
    622/*
    623 * isp_stat_buf_process - Process statistic buffers.
    624 * @buf_state: points out if buffer is ready to be processed. It's necessary
    625 *	       because histogram needs to copy the data from internal memory
    626 *	       before be able to process the buffer.
    627 */
    628static int isp_stat_buf_process(struct ispstat *stat, int buf_state)
    629{
    630	int ret = STAT_NO_BUF;
    631
    632	if (!atomic_add_unless(&stat->buf_err, -1, 0) &&
    633	    buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) {
    634		ret = isp_stat_buf_queue(stat);
    635		isp_stat_buf_next(stat);
    636	}
    637
    638	return ret;
    639}
    640
    641int omap3isp_stat_pcr_busy(struct ispstat *stat)
    642{
    643	return stat->ops->busy(stat);
    644}
    645
    646int omap3isp_stat_busy(struct ispstat *stat)
    647{
    648	return omap3isp_stat_pcr_busy(stat) | stat->buf_processing |
    649		(stat->state != ISPSTAT_DISABLED);
    650}
    651
    652/*
    653 * isp_stat_pcr_enable - Disables/Enables statistic engines.
    654 * @pcr_enable: 0/1 - Disables/Enables the engine.
    655 *
    656 * Must be called from ISP driver when the module is idle and synchronized
    657 * with CCDC.
    658 */
    659static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable)
    660{
    661	if ((stat->state != ISPSTAT_ENABLING &&
    662	     stat->state != ISPSTAT_ENABLED) && pcr_enable)
    663		/* Userspace has disabled the module. Aborting. */
    664		return;
    665
    666	stat->ops->enable(stat, pcr_enable);
    667	if (stat->state == ISPSTAT_DISABLING && !pcr_enable)
    668		stat->state = ISPSTAT_DISABLED;
    669	else if (stat->state == ISPSTAT_ENABLING && pcr_enable)
    670		stat->state = ISPSTAT_ENABLED;
    671}
    672
    673void omap3isp_stat_suspend(struct ispstat *stat)
    674{
    675	unsigned long flags;
    676
    677	spin_lock_irqsave(&stat->isp->stat_lock, flags);
    678
    679	if (stat->state != ISPSTAT_DISABLED)
    680		stat->ops->enable(stat, 0);
    681	if (stat->state == ISPSTAT_ENABLED)
    682		stat->state = ISPSTAT_SUSPENDED;
    683
    684	spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
    685}
    686
    687void omap3isp_stat_resume(struct ispstat *stat)
    688{
    689	/* Module will be re-enabled with its pipeline */
    690	if (stat->state == ISPSTAT_SUSPENDED)
    691		stat->state = ISPSTAT_ENABLING;
    692}
    693
    694static void isp_stat_try_enable(struct ispstat *stat)
    695{
    696	unsigned long irqflags;
    697
    698	if (stat->priv == NULL)
    699		/* driver wasn't initialised */
    700		return;
    701
    702	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
    703	if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing &&
    704	    stat->buf_alloc_size) {
    705		/*
    706		 * Userspace's requested to enable the engine but it wasn't yet.
    707		 * Let's do that now.
    708		 */
    709		stat->update = 1;
    710		isp_stat_buf_next(stat);
    711		stat->ops->setup_regs(stat, stat->priv);
    712		isp_stat_buf_insert_magic(stat, stat->active_buf);
    713
    714		/*
    715		 * H3A module has some hw issues which forces the driver to
    716		 * ignore next buffers even if it was disabled in the meantime.
    717		 * On the other hand, Histogram shouldn't ignore buffers anymore
    718		 * if it's being enabled.
    719		 */
    720		if (!IS_H3A(stat))
    721			atomic_set(&stat->buf_err, 0);
    722
    723		isp_stat_pcr_enable(stat, 1);
    724		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    725		dev_dbg(stat->isp->dev, "%s: module is enabled.\n",
    726			stat->subdev.name);
    727	} else {
    728		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    729	}
    730}
    731
    732void omap3isp_stat_isr_frame_sync(struct ispstat *stat)
    733{
    734	isp_stat_try_enable(stat);
    735}
    736
    737void omap3isp_stat_sbl_overflow(struct ispstat *stat)
    738{
    739	unsigned long irqflags;
    740
    741	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
    742	/*
    743	 * Due to a H3A hw issue which prevents the next buffer to start from
    744	 * the correct memory address, 2 buffers must be ignored.
    745	 */
    746	atomic_set(&stat->buf_err, 2);
    747
    748	/*
    749	 * If more than one SBL overflow happen in a row, H3A module may access
    750	 * invalid memory region.
    751	 * stat->sbl_ovl_recover is set to tell to the driver to temporarily use
    752	 * a soft configuration which helps to avoid consecutive overflows.
    753	 */
    754	if (stat->recover_priv)
    755		stat->sbl_ovl_recover = 1;
    756	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    757}
    758
    759/*
    760 * omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible
    761 * @enable: 0/1 - Disables/Enables the engine.
    762 *
    763 * Client should configure all the module registers before this.
    764 * This function can be called from a userspace request.
    765 */
    766int omap3isp_stat_enable(struct ispstat *stat, u8 enable)
    767{
    768	unsigned long irqflags;
    769
    770	dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n",
    771		stat->subdev.name, enable ? "enable" : "disable");
    772
    773	/* Prevent enabling while configuring */
    774	mutex_lock(&stat->ioctl_lock);
    775
    776	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
    777
    778	if (!stat->configured && enable) {
    779		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    780		mutex_unlock(&stat->ioctl_lock);
    781		dev_dbg(stat->isp->dev,
    782			"%s: cannot enable module as it's never been successfully configured so far.\n",
    783			stat->subdev.name);
    784		return -EINVAL;
    785	}
    786
    787	if (enable) {
    788		if (stat->state == ISPSTAT_DISABLING)
    789			/* Previous disabling request wasn't done yet */
    790			stat->state = ISPSTAT_ENABLED;
    791		else if (stat->state == ISPSTAT_DISABLED)
    792			/* Module is now being enabled */
    793			stat->state = ISPSTAT_ENABLING;
    794	} else {
    795		if (stat->state == ISPSTAT_ENABLING) {
    796			/* Previous enabling request wasn't done yet */
    797			stat->state = ISPSTAT_DISABLED;
    798		} else if (stat->state == ISPSTAT_ENABLED) {
    799			/* Module is now being disabled */
    800			stat->state = ISPSTAT_DISABLING;
    801			isp_stat_buf_clear(stat);
    802		}
    803	}
    804
    805	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    806	mutex_unlock(&stat->ioctl_lock);
    807
    808	return 0;
    809}
    810
    811int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable)
    812{
    813	struct ispstat *stat = v4l2_get_subdevdata(subdev);
    814
    815	if (enable) {
    816		/*
    817		 * Only set enable PCR bit if the module was previously
    818		 * enabled through ioctl.
    819		 */
    820		isp_stat_try_enable(stat);
    821	} else {
    822		unsigned long flags;
    823		/* Disable PCR bit and config enable field */
    824		omap3isp_stat_enable(stat, 0);
    825		spin_lock_irqsave(&stat->isp->stat_lock, flags);
    826		stat->ops->enable(stat, 0);
    827		spin_unlock_irqrestore(&stat->isp->stat_lock, flags);
    828
    829		/*
    830		 * If module isn't busy, a new interrupt may come or not to
    831		 * set the state to DISABLED. As Histogram needs to read its
    832		 * internal memory to clear it, let interrupt handler
    833		 * responsible of changing state to DISABLED. If the last
    834		 * interrupt is coming, it's still safe as the handler will
    835		 * ignore the second time when state is already set to DISABLED.
    836		 * It's necessary to synchronize Histogram with streamoff, once
    837		 * the module may be considered idle before last SDMA transfer
    838		 * starts if we return here.
    839		 */
    840		if (!omap3isp_stat_pcr_busy(stat))
    841			omap3isp_stat_isr(stat);
    842
    843		dev_dbg(stat->isp->dev, "%s: module is being disabled\n",
    844			stat->subdev.name);
    845	}
    846
    847	return 0;
    848}
    849
    850/*
    851 * __stat_isr - Interrupt handler for statistic drivers
    852 */
    853static void __stat_isr(struct ispstat *stat, int from_dma)
    854{
    855	int ret = STAT_BUF_DONE;
    856	int buf_processing;
    857	unsigned long irqflags;
    858	struct isp_pipeline *pipe;
    859
    860	/*
    861	 * stat->buf_processing must be set before disable module. It's
    862	 * necessary to not inform too early the buffers aren't busy in case
    863	 * of SDMA is going to be used.
    864	 */
    865	spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
    866	if (stat->state == ISPSTAT_DISABLED) {
    867		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    868		return;
    869	}
    870	buf_processing = stat->buf_processing;
    871	stat->buf_processing = 1;
    872	stat->ops->enable(stat, 0);
    873
    874	if (buf_processing && !from_dma) {
    875		if (stat->state == ISPSTAT_ENABLED) {
    876			spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    877			dev_err(stat->isp->dev,
    878				"%s: interrupt occurred when module was still processing a buffer.\n",
    879				stat->subdev.name);
    880			ret = STAT_NO_BUF;
    881			goto out;
    882		} else {
    883			/*
    884			 * Interrupt handler was called from streamoff when
    885			 * the module wasn't busy anymore to ensure it is being
    886			 * disabled after process last buffer. If such buffer
    887			 * processing has already started, no need to do
    888			 * anything else.
    889			 */
    890			spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    891			return;
    892		}
    893	}
    894	spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    895
    896	/* If it's busy we can't process this buffer anymore */
    897	if (!omap3isp_stat_pcr_busy(stat)) {
    898		if (!from_dma && stat->ops->buf_process)
    899			/* Module still need to copy data to buffer. */
    900			ret = stat->ops->buf_process(stat);
    901		if (ret == STAT_BUF_WAITING_DMA)
    902			/* Buffer is not ready yet */
    903			return;
    904
    905		spin_lock_irqsave(&stat->isp->stat_lock, irqflags);
    906
    907		/*
    908		 * Histogram needs to read its internal memory to clear it
    909		 * before be disabled. For that reason, common statistic layer
    910		 * can return only after call stat's buf_process() operator.
    911		 */
    912		if (stat->state == ISPSTAT_DISABLING) {
    913			stat->state = ISPSTAT_DISABLED;
    914			spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    915			stat->buf_processing = 0;
    916			return;
    917		}
    918		pipe = to_isp_pipeline(&stat->subdev.entity);
    919		stat->frame_number = atomic_read(&pipe->frame_number);
    920
    921		/*
    922		 * Before this point, 'ret' stores the buffer's status if it's
    923		 * ready to be processed. Afterwards, it holds the status if
    924		 * it was processed successfully.
    925		 */
    926		ret = isp_stat_buf_process(stat, ret);
    927
    928		if (likely(!stat->sbl_ovl_recover)) {
    929			stat->ops->setup_regs(stat, stat->priv);
    930		} else {
    931			/*
    932			 * Using recover config to increase the chance to have
    933			 * a good buffer processing and make the H3A module to
    934			 * go back to a valid state.
    935			 */
    936			stat->update = 1;
    937			stat->ops->setup_regs(stat, stat->recover_priv);
    938			stat->sbl_ovl_recover = 0;
    939
    940			/*
    941			 * Set 'update' in case of the module needs to use
    942			 * regular configuration after next buffer.
    943			 */
    944			stat->update = 1;
    945		}
    946
    947		isp_stat_buf_insert_magic(stat, stat->active_buf);
    948
    949		/*
    950		 * Hack: H3A modules may access invalid memory address or send
    951		 * corrupted data to userspace if more than 1 SBL overflow
    952		 * happens in a row without re-writing its buffer's start memory
    953		 * address in the meantime. Such situation is avoided if the
    954		 * module is not immediately re-enabled when the ISR misses the
    955		 * timing to process the buffer and to setup the registers.
    956		 * Because of that, pcr_enable(1) was moved to inside this 'if'
    957		 * block. But the next interruption will still happen as during
    958		 * pcr_enable(0) the module was busy.
    959		 */
    960		isp_stat_pcr_enable(stat, 1);
    961		spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags);
    962	} else {
    963		/*
    964		 * If a SBL overflow occurs and the H3A driver misses the timing
    965		 * to process the buffer, stat->buf_err is set and won't be
    966		 * cleared now. So the next buffer will be correctly ignored.
    967		 * It's necessary due to a hw issue which makes the next H3A
    968		 * buffer to start from the memory address where the previous
    969		 * one stopped, instead of start where it was configured to.
    970		 * Do not "stat->buf_err = 0" here.
    971		 */
    972
    973		if (stat->ops->buf_process)
    974			/*
    975			 * Driver may need to erase current data prior to
    976			 * process a new buffer. If it misses the timing, the
    977			 * next buffer might be wrong. So should be ignored.
    978			 * It happens only for Histogram.
    979			 */
    980			atomic_set(&stat->buf_err, 1);
    981
    982		ret = STAT_NO_BUF;
    983		dev_dbg(stat->isp->dev,
    984			"%s: cannot process buffer, device is busy.\n",
    985			stat->subdev.name);
    986	}
    987
    988out:
    989	stat->buf_processing = 0;
    990	isp_stat_queue_event(stat, ret != STAT_BUF_DONE);
    991}
    992
    993void omap3isp_stat_isr(struct ispstat *stat)
    994{
    995	__stat_isr(stat, 0);
    996}
    997
    998void omap3isp_stat_dma_isr(struct ispstat *stat)
    999{
   1000	__stat_isr(stat, 1);
   1001}
   1002
   1003int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev,
   1004				  struct v4l2_fh *fh,
   1005				  struct v4l2_event_subscription *sub)
   1006{
   1007	struct ispstat *stat = v4l2_get_subdevdata(subdev);
   1008
   1009	if (sub->type != stat->event_type)
   1010		return -EINVAL;
   1011
   1012	return v4l2_event_subscribe(fh, sub, STAT_NEVENTS, NULL);
   1013}
   1014
   1015int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev,
   1016				    struct v4l2_fh *fh,
   1017				    struct v4l2_event_subscription *sub)
   1018{
   1019	return v4l2_event_unsubscribe(fh, sub);
   1020}
   1021
   1022void omap3isp_stat_unregister_entities(struct ispstat *stat)
   1023{
   1024	v4l2_device_unregister_subdev(&stat->subdev);
   1025}
   1026
   1027int omap3isp_stat_register_entities(struct ispstat *stat,
   1028				    struct v4l2_device *vdev)
   1029{
   1030	stat->subdev.dev = vdev->mdev->dev;
   1031
   1032	return v4l2_device_register_subdev(vdev, &stat->subdev);
   1033}
   1034
   1035static int isp_stat_init_entities(struct ispstat *stat, const char *name,
   1036				  const struct v4l2_subdev_ops *sd_ops)
   1037{
   1038	struct v4l2_subdev *subdev = &stat->subdev;
   1039	struct media_entity *me = &subdev->entity;
   1040
   1041	v4l2_subdev_init(subdev, sd_ops);
   1042	snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
   1043	subdev->grp_id = BIT(16);	/* group ID for isp subdevs */
   1044	subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
   1045	v4l2_set_subdevdata(subdev, stat);
   1046
   1047	stat->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
   1048	me->ops = NULL;
   1049
   1050	return media_entity_pads_init(me, 1, &stat->pad);
   1051}
   1052
   1053int omap3isp_stat_init(struct ispstat *stat, const char *name,
   1054		       const struct v4l2_subdev_ops *sd_ops)
   1055{
   1056	int ret;
   1057
   1058	stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL);
   1059	if (!stat->buf)
   1060		return -ENOMEM;
   1061
   1062	isp_stat_buf_clear(stat);
   1063	mutex_init(&stat->ioctl_lock);
   1064	atomic_set(&stat->buf_err, 0);
   1065
   1066	ret = isp_stat_init_entities(stat, name, sd_ops);
   1067	if (ret < 0) {
   1068		mutex_destroy(&stat->ioctl_lock);
   1069		kfree(stat->buf);
   1070	}
   1071
   1072	return ret;
   1073}
   1074
   1075void omap3isp_stat_cleanup(struct ispstat *stat)
   1076{
   1077	media_entity_cleanup(&stat->subdev.entity);
   1078	mutex_destroy(&stat->ioctl_lock);
   1079	isp_stat_bufs_free(stat);
   1080	kfree(stat->buf);
   1081	kfree(stat->priv);
   1082	kfree(stat->recover_priv);
   1083}