1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * isphist.c
4 *
5 * TI OMAP3 ISP - Histogram module
6 *
7 * Copyright (C) 2010 Nokia Corporation
8 * Copyright (C) 2009 Texas Instruments, Inc.
9 *
10 * Contacts: David Cohen <dacohen@gmail.com>
11 *	     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
12 *	     Sakari Ailus <sakari.ailus@iki.fi>
13 */
14
15#include <linux/delay.h>
16#include <linux/device.h>
17#include <linux/dmaengine.h>
18#include <linux/slab.h>
19#include <linux/uaccess.h>
20
21#include "isp.h"
22#include "ispreg.h"
23#include "isphist.h"
24
25#define HIST_CONFIG_DMA	1
26
27/*
28 * hist_reset_mem - clear Histogram memory before start stats engine.
29 */
30static void hist_reset_mem(struct ispstat *hist)
31{
32	struct isp_device *isp = hist->isp;
33	struct omap3isp_hist_config *conf = hist->priv;
34	unsigned int i;
35
36	isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
37
38	/*
39	 * By setting it, the histogram internal buffer is being cleared at the
40	 * same time it's being read. This bit must be cleared afterwards.
41	 */
42	isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
43
44	/*
45	 * We'll clear 4 words at each iteration for optimization. It avoids
46	 * 3/4 of the jumps. We also know HIST_MEM_SIZE is divisible by 4.
47	 */
48	for (i = OMAP3ISP_HIST_MEM_SIZE / 4; i > 0; i--) {
49		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
50		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
51		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
52		isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
53	}
54	isp_reg_clr(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
55
56	hist->wait_acc_frames = conf->num_acc_frames;
57}
58
59/*
60 * hist_setup_regs - Helper function to update Histogram registers.
61 */
62static void hist_setup_regs(struct ispstat *hist, void *priv)
63{
64	struct isp_device *isp = hist->isp;
65	struct omap3isp_hist_config *conf = priv;
66	int c;
67	u32 cnt;
68	u32 wb_gain;
69	u32 reg_hor[OMAP3ISP_HIST_MAX_REGIONS];
70	u32 reg_ver[OMAP3ISP_HIST_MAX_REGIONS];
71
72	if (!hist->update || hist->state == ISPSTAT_DISABLED ||
73	    hist->state == ISPSTAT_DISABLING)
74		return;
75
76	cnt = conf->cfa << ISPHIST_CNT_CFA_SHIFT;
77
78	wb_gain = conf->wg[0] << ISPHIST_WB_GAIN_WG00_SHIFT;
79	wb_gain |= conf->wg[1] << ISPHIST_WB_GAIN_WG01_SHIFT;
80	wb_gain |= conf->wg[2] << ISPHIST_WB_GAIN_WG02_SHIFT;
81	if (conf->cfa == OMAP3ISP_HIST_CFA_BAYER)
82		wb_gain |= conf->wg[3] << ISPHIST_WB_GAIN_WG03_SHIFT;
83
84	/* Regions size and position */
85	for (c = 0; c < OMAP3ISP_HIST_MAX_REGIONS; c++) {
86		if (c < conf->num_regions) {
87			reg_hor[c] = (conf->region[c].h_start <<
88				     ISPHIST_REG_START_SHIFT)
89				   | (conf->region[c].h_end <<
90				     ISPHIST_REG_END_SHIFT);
91			reg_ver[c] = (conf->region[c].v_start <<
92				     ISPHIST_REG_START_SHIFT)
93				   | (conf->region[c].v_end <<
94				     ISPHIST_REG_END_SHIFT);
95		} else {
96			reg_hor[c] = 0;
97			reg_ver[c] = 0;
98		}
99	}
100
101	cnt |= conf->hist_bins << ISPHIST_CNT_BINS_SHIFT;
102	switch (conf->hist_bins) {
103	case OMAP3ISP_HIST_BINS_256:
104		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 8) <<
105			ISPHIST_CNT_SHIFT_SHIFT;
106		break;
107	case OMAP3ISP_HIST_BINS_128:
108		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 7) <<
109			ISPHIST_CNT_SHIFT_SHIFT;
110		break;
111	case OMAP3ISP_HIST_BINS_64:
112		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 6) <<
113			ISPHIST_CNT_SHIFT_SHIFT;
114		break;
115	default: /* OMAP3ISP_HIST_BINS_32 */
116		cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 5) <<
117			ISPHIST_CNT_SHIFT_SHIFT;
118		break;
119	}
120
121	hist_reset_mem(hist);
122
123	isp_reg_writel(isp, cnt, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT);
124	isp_reg_writel(isp, wb_gain,  OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN);
125	isp_reg_writel(isp, reg_hor[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_HORZ);
126	isp_reg_writel(isp, reg_ver[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_VERT);
127	isp_reg_writel(isp, reg_hor[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_HORZ);
128	isp_reg_writel(isp, reg_ver[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_VERT);
129	isp_reg_writel(isp, reg_hor[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_HORZ);
130	isp_reg_writel(isp, reg_ver[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_VERT);
131	isp_reg_writel(isp, reg_hor[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_HORZ);
132	isp_reg_writel(isp, reg_ver[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_VERT);
133
134	hist->update = 0;
135	hist->config_counter += hist->inc_config;
136	hist->inc_config = 0;
137	hist->buf_size = conf->buf_size;
138}
139
140static void hist_enable(struct ispstat *hist, int enable)
141{
142	if (enable) {
143		isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
144			    ISPHIST_PCR_ENABLE);
145		omap3isp_subclk_enable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
146	} else {
147		isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
148			    ISPHIST_PCR_ENABLE);
149		omap3isp_subclk_disable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
150	}
151}
152
153static int hist_busy(struct ispstat *hist)
154{
155	return isp_reg_readl(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR)
156						& ISPHIST_PCR_BUSY;
157}
158
159static void hist_dma_cb(void *data)
160{
161	struct ispstat *hist = data;
162
163	/* FIXME: The DMA engine API can't report transfer errors :-/ */
164
165	isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
166		    ISPHIST_CNT_CLEAR);
167
168	omap3isp_stat_dma_isr(hist);
169	if (hist->state != ISPSTAT_DISABLED)
170		omap3isp_hist_dma_done(hist->isp);
171}
172
173static int hist_buf_dma(struct ispstat *hist)
174{
175	dma_addr_t dma_addr = hist->active_buf->dma_addr;
176	struct dma_async_tx_descriptor *tx;
177	struct dma_slave_config cfg;
178	dma_cookie_t cookie;
179	int ret;
180
181	if (unlikely(!dma_addr)) {
182		dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
183		goto error;
184	}
185
186	isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
187	isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
188		    ISPHIST_CNT_CLEAR);
189	omap3isp_flush(hist->isp);
190
191	memset(&cfg, 0, sizeof(cfg));
192	cfg.src_addr = hist->isp->mmio_hist_base_phys + ISPHIST_DATA;
193	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
194	cfg.src_maxburst = hist->buf_size / 4;
195
196	ret = dmaengine_slave_config(hist->dma_ch, &cfg);
197	if (ret < 0) {
198		dev_dbg(hist->isp->dev,
199			"hist: DMA slave configuration failed\n");
200		goto error;
201	}
202
203	tx = dmaengine_prep_slave_single(hist->dma_ch, dma_addr,
204					 hist->buf_size, DMA_DEV_TO_MEM,
205					 DMA_CTRL_ACK);
206	if (tx == NULL) {
207		dev_dbg(hist->isp->dev,
208			"hist: DMA slave preparation failed\n");
209		goto error;
210	}
211
212	tx->callback = hist_dma_cb;
213	tx->callback_param = hist;
214	cookie = tx->tx_submit(tx);
215	if (dma_submit_error(cookie)) {
216		dev_dbg(hist->isp->dev, "hist: DMA submission failed\n");
217		goto error;
218	}
219
220	dma_async_issue_pending(hist->dma_ch);
221
222	return STAT_BUF_WAITING_DMA;
223
224error:
225	hist_reset_mem(hist);
226	return STAT_NO_BUF;
227}
228
229static int hist_buf_pio(struct ispstat *hist)
230{
231	struct isp_device *isp = hist->isp;
232	u32 *buf = hist->active_buf->virt_addr;
233	unsigned int i;
234
235	if (!buf) {
236		dev_dbg(isp->dev, "hist: invalid PIO buffer address\n");
237		hist_reset_mem(hist);
238		return STAT_NO_BUF;
239	}
240
241	isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
242
243	/*
244	 * By setting it, the histogram internal buffer is being cleared at the
245	 * same time it's being read. This bit must be cleared just after all
246	 * data is acquired.
247	 */
248	isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
249
250	/*
251	 * We'll read 4 times a 4-bytes-word at each iteration for
252	 * optimization. It avoids 3/4 of the jumps. We also know buf_size is
253	 * divisible by 16.
254	 */
255	for (i = hist->buf_size / 16; i > 0; i--) {
256		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
257		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
258		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
259		*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
260	}
261	isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
262		    ISPHIST_CNT_CLEAR);
263
264	return STAT_BUF_DONE;
265}
266
267/*
268 * hist_buf_process - Callback from ISP driver for HIST interrupt.
269 */
270static int hist_buf_process(struct ispstat *hist)
271{
272	struct omap3isp_hist_config *user_cfg = hist->priv;
273	int ret;
274
275	if (atomic_read(&hist->buf_err) || hist->state != ISPSTAT_ENABLED) {
276		hist_reset_mem(hist);
277		return STAT_NO_BUF;
278	}
279
280	if (--(hist->wait_acc_frames))
281		return STAT_NO_BUF;
282
283	if (hist->dma_ch)
284		ret = hist_buf_dma(hist);
285	else
286		ret = hist_buf_pio(hist);
287
288	hist->wait_acc_frames = user_cfg->num_acc_frames;
289
290	return ret;
291}
292
293static u32 hist_get_buf_size(struct omap3isp_hist_config *conf)
294{
295	return OMAP3ISP_HIST_MEM_SIZE_BINS(conf->hist_bins) * conf->num_regions;
296}
297
298/*
299 * hist_validate_params - Helper function to check user given params.
300 * @new_conf: Pointer to user configuration structure.
301 *
302 * Returns 0 on success configuration.
303 */
304static int hist_validate_params(struct ispstat *hist, void *new_conf)
305{
306	struct omap3isp_hist_config *user_cfg = new_conf;
307	int c;
308	u32 buf_size;
309
310	if (user_cfg->cfa > OMAP3ISP_HIST_CFA_FOVEONX3)
311		return -EINVAL;
312
313	/* Regions size and position */
314
315	if ((user_cfg->num_regions < OMAP3ISP_HIST_MIN_REGIONS) ||
316	    (user_cfg->num_regions > OMAP3ISP_HIST_MAX_REGIONS))
317		return -EINVAL;
318
319	/* Regions */
320	for (c = 0; c < user_cfg->num_regions; c++) {
321		if (user_cfg->region[c].h_start & ~ISPHIST_REG_START_END_MASK)
322			return -EINVAL;
323		if (user_cfg->region[c].h_end & ~ISPHIST_REG_START_END_MASK)
324			return -EINVAL;
325		if (user_cfg->region[c].v_start & ~ISPHIST_REG_START_END_MASK)
326			return -EINVAL;
327		if (user_cfg->region[c].v_end & ~ISPHIST_REG_START_END_MASK)
328			return -EINVAL;
329		if (user_cfg->region[c].h_start > user_cfg->region[c].h_end)
330			return -EINVAL;
331		if (user_cfg->region[c].v_start > user_cfg->region[c].v_end)
332			return -EINVAL;
333	}
334
335	switch (user_cfg->num_regions) {
336	case 1:
337		if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_256)
338			return -EINVAL;
339		break;
340	case 2:
341		if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_128)
342			return -EINVAL;
343		break;
344	default: /* 3 or 4 */
345		if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_64)
346			return -EINVAL;
347		break;
348	}
349
350	buf_size = hist_get_buf_size(user_cfg);
351	if (buf_size > user_cfg->buf_size)
352		/* User's buf_size request wasn't enough */
353		user_cfg->buf_size = buf_size;
354	else if (user_cfg->buf_size > OMAP3ISP_HIST_MAX_BUF_SIZE)
355		user_cfg->buf_size = OMAP3ISP_HIST_MAX_BUF_SIZE;
356
357	return 0;
358}
359
360static int hist_comp_params(struct ispstat *hist,
361			    struct omap3isp_hist_config *user_cfg)
362{
363	struct omap3isp_hist_config *cur_cfg = hist->priv;
364	int c;
365
366	if (cur_cfg->cfa != user_cfg->cfa)
367		return 1;
368
369	if (cur_cfg->num_acc_frames != user_cfg->num_acc_frames)
370		return 1;
371
372	if (cur_cfg->hist_bins != user_cfg->hist_bins)
373		return 1;
374
375	for (c = 0; c < OMAP3ISP_HIST_MAX_WG; c++) {
376		if (c == 3 && user_cfg->cfa == OMAP3ISP_HIST_CFA_FOVEONX3)
377			break;
378		else if (cur_cfg->wg[c] != user_cfg->wg[c])
379			return 1;
380	}
381
382	if (cur_cfg->num_regions != user_cfg->num_regions)
383		return 1;
384
385	/* Regions */
386	for (c = 0; c < user_cfg->num_regions; c++) {
387		if (cur_cfg->region[c].h_start != user_cfg->region[c].h_start)
388			return 1;
389		if (cur_cfg->region[c].h_end != user_cfg->region[c].h_end)
390			return 1;
391		if (cur_cfg->region[c].v_start != user_cfg->region[c].v_start)
392			return 1;
393		if (cur_cfg->region[c].v_end != user_cfg->region[c].v_end)
394			return 1;
395	}
396
397	return 0;
398}
399
400/*
401 * hist_update_params - Helper function to check and store user given params.
402 * @new_conf: Pointer to user configuration structure.
403 */
404static void hist_set_params(struct ispstat *hist, void *new_conf)
405{
406	struct omap3isp_hist_config *user_cfg = new_conf;
407	struct omap3isp_hist_config *cur_cfg = hist->priv;
408
409	if (!hist->configured || hist_comp_params(hist, user_cfg)) {
410		memcpy(cur_cfg, user_cfg, sizeof(*user_cfg));
411		if (user_cfg->num_acc_frames == 0)
412			user_cfg->num_acc_frames = 1;
413		hist->inc_config++;
414		hist->update = 1;
415		/*
416		 * User might be asked for a bigger buffer than necessary for
417		 * this configuration. In order to return the right amount of
418		 * data during buffer request, let's calculate the size here
419		 * instead of stick with user_cfg->buf_size.
420		 */
421		cur_cfg->buf_size = hist_get_buf_size(cur_cfg);
422
423	}
424}
425
426static long hist_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
427{
428	struct ispstat *stat = v4l2_get_subdevdata(sd);
429
430	switch (cmd) {
431	case VIDIOC_OMAP3ISP_HIST_CFG:
432		return omap3isp_stat_config(stat, arg);
433	case VIDIOC_OMAP3ISP_STAT_REQ:
434		return omap3isp_stat_request_statistics(stat, arg);
435	case VIDIOC_OMAP3ISP_STAT_REQ_TIME32:
436		return omap3isp_stat_request_statistics_time32(stat, arg);
437	case VIDIOC_OMAP3ISP_STAT_EN: {
438		int *en = arg;
439		return omap3isp_stat_enable(stat, !!*en);
440	}
441	}
442
443	return -ENOIOCTLCMD;
444
445}
446
447static const struct ispstat_ops hist_ops = {
448	.validate_params	= hist_validate_params,
449	.set_params		= hist_set_params,
450	.setup_regs		= hist_setup_regs,
451	.enable			= hist_enable,
452	.busy			= hist_busy,
453	.buf_process		= hist_buf_process,
454};
455
456static const struct v4l2_subdev_core_ops hist_subdev_core_ops = {
457	.ioctl = hist_ioctl,
458	.subscribe_event = omap3isp_stat_subscribe_event,
459	.unsubscribe_event = omap3isp_stat_unsubscribe_event,
460};
461
462static const struct v4l2_subdev_video_ops hist_subdev_video_ops = {
463	.s_stream = omap3isp_stat_s_stream,
464};
465
466static const struct v4l2_subdev_ops hist_subdev_ops = {
467	.core = &hist_subdev_core_ops,
468	.video = &hist_subdev_video_ops,
469};
470
471/*
472 * omap3isp_hist_init - Module Initialization.
473 */
474int omap3isp_hist_init(struct isp_device *isp)
475{
476	struct ispstat *hist = &isp->isp_hist;
477	struct omap3isp_hist_config *hist_cfg;
478	int ret;
479
480	hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL);
481	if (hist_cfg == NULL)
482		return -ENOMEM;
483
484	hist->isp = isp;
485
486	if (HIST_CONFIG_DMA) {
487		dma_cap_mask_t mask;
488
489		/*
490		 * We need slave capable channel without DMA request line for
491		 * reading out the data.
492		 * For this we can use dma_request_chan_by_mask() as we are
493		 * happy with any channel as long as it is capable of slave
494		 * configuration.
495		 */
496		dma_cap_zero(mask);
497		dma_cap_set(DMA_SLAVE, mask);
498		hist->dma_ch = dma_request_chan_by_mask(&mask);
499		if (IS_ERR(hist->dma_ch)) {
500			ret = PTR_ERR(hist->dma_ch);
501			if (ret == -EPROBE_DEFER)
502				goto err;
503
504			hist->dma_ch = NULL;
505			dev_warn(isp->dev,
506				 "hist: DMA channel request failed, using PIO\n");
507		} else {
508			dev_dbg(isp->dev, "hist: using DMA channel %s\n",
509				dma_chan_name(hist->dma_ch));
510		}
511	}
512
513	hist->ops = &hist_ops;
514	hist->priv = hist_cfg;
515	hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
516
517	ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
518
519err:
520	if (ret) {
521		if (!IS_ERR_OR_NULL(hist->dma_ch))
522			dma_release_channel(hist->dma_ch);
523		kfree(hist_cfg);
524	}
525
526	return ret;
527}
528
529/*
530 * omap3isp_hist_cleanup - Module cleanup.
531 */
532void omap3isp_hist_cleanup(struct isp_device *isp)
533{
534	struct ispstat *hist = &isp->isp_hist;
535
536	if (hist->dma_ch)
537		dma_release_channel(hist->dma_ch);
538
539	omap3isp_stat_cleanup(hist);
540}
541