1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * camss-vfe-170.c
4  *
5  * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v170
6  *
7  * Copyright (C) 2020-2021 Linaro Ltd.
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/iopoll.h>
14 
15 #include "camss.h"
16 #include "camss-vfe.h"
17 
18 #define VFE_HW_VERSION				(0x000)
19 
20 #define VFE_GLOBAL_RESET_CMD			(0x018)
21 #define		GLOBAL_RESET_CMD_CORE		BIT(0)
22 #define		GLOBAL_RESET_CMD_CAMIF		BIT(1)
23 #define		GLOBAL_RESET_CMD_BUS		BIT(2)
24 #define		GLOBAL_RESET_CMD_BUS_BDG	BIT(3)
25 #define		GLOBAL_RESET_CMD_REGISTER	BIT(4)
26 #define		GLOBAL_RESET_CMD_PM		BIT(5)
27 #define		GLOBAL_RESET_CMD_BUS_MISR	BIT(6)
28 #define		GLOBAL_RESET_CMD_TESTGEN	BIT(7)
29 #define		GLOBAL_RESET_CMD_DSP		BIT(8)
30 #define		GLOBAL_RESET_CMD_IDLE_CGC	BIT(9)
31 #define		GLOBAL_RESET_CMD_RDI0		BIT(10)
32 #define		GLOBAL_RESET_CMD_RDI1		BIT(11)
33 #define		GLOBAL_RESET_CMD_RDI2		BIT(12)
34 #define		GLOBAL_RESET_CMD_RDI3		BIT(13)
35 #define		GLOBAL_RESET_CMD_VFE_DOMAIN	BIT(30)
36 #define		GLOBAL_RESET_CMD_RESET_BYPASS	BIT(31)
37 
38 #define VFE_CORE_CFG				(0x050)
39 #define		CFG_PIXEL_PATTERN_YCBYCR	(0x4)
40 #define		CFG_PIXEL_PATTERN_YCRYCB	(0x5)
41 #define		CFG_PIXEL_PATTERN_CBYCRY	(0x6)
42 #define		CFG_PIXEL_PATTERN_CRYCBY	(0x7)
43 #define		CFG_COMPOSITE_REG_UPDATE_EN	BIT(4)
44 
45 #define VFE_IRQ_CMD				(0x058)
46 #define		CMD_GLOBAL_CLEAR		BIT(0)
47 
48 #define VFE_IRQ_MASK_0					(0x05c)
49 #define		MASK_0_CAMIF_SOF			BIT(0)
50 #define		MASK_0_CAMIF_EOF			BIT(1)
51 #define		MASK_0_RDI_REG_UPDATE(n)		BIT((n) + 5)
52 #define		MASK_0_IMAGE_MASTER_n_PING_PONG(n)	BIT((n) + 8)
53 #define		MASK_0_IMAGE_COMPOSITE_DONE_n(n)	BIT((n) + 25)
54 #define		MASK_0_RESET_ACK			BIT(31)
55 
56 #define VFE_IRQ_MASK_1					(0x060)
57 #define		MASK_1_CAMIF_ERROR			BIT(0)
58 #define		MASK_1_VIOLATION			BIT(7)
59 #define		MASK_1_BUS_BDG_HALT_ACK			BIT(8)
60 #define		MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n)	BIT((n) + 9)
61 #define		MASK_1_RDI_SOF(n)			BIT((n) + 29)
62 
63 #define VFE_IRQ_CLEAR_0					(0x064)
64 #define VFE_IRQ_CLEAR_1					(0x068)
65 
66 #define VFE_IRQ_STATUS_0				(0x06c)
67 #define		STATUS_0_CAMIF_SOF			BIT(0)
68 #define		STATUS_0_RDI_REG_UPDATE(n)		BIT((n) + 5)
69 #define		STATUS_0_IMAGE_MASTER_PING_PONG(n)	BIT((n) + 8)
70 #define		STATUS_0_IMAGE_COMPOSITE_DONE(n)	BIT((n) + 25)
71 #define		STATUS_0_RESET_ACK			BIT(31)
72 
73 #define VFE_IRQ_STATUS_1				(0x070)
74 #define		STATUS_1_VIOLATION			BIT(7)
75 #define		STATUS_1_BUS_BDG_HALT_ACK		BIT(8)
76 #define		STATUS_1_RDI_SOF(n)			BIT((n) + 27)
77 
78 #define VFE_VIOLATION_STATUS			(0x07c)
79 
80 #define VFE_CAMIF_CMD				(0x478)
81 #define		CMD_CLEAR_CAMIF_STATUS		BIT(2)
82 
83 #define VFE_CAMIF_CFG				(0x47c)
84 #define		CFG_VSYNC_SYNC_EDGE		(0)
85 #define			VSYNC_ACTIVE_HIGH	(0)
86 #define			VSYNC_ACTIVE_LOW	(1)
87 #define		CFG_HSYNC_SYNC_EDGE		(1)
88 #define			HSYNC_ACTIVE_HIGH	(0)
89 #define			HSYNC_ACTIVE_LOW	(1)
90 #define		CFG_VFE_SUBSAMPLE_ENABLE	BIT(4)
91 #define		CFG_BUS_SUBSAMPLE_ENABLE	BIT(5)
92 #define		CFG_VFE_OUTPUT_EN		BIT(6)
93 #define		CFG_BUS_OUTPUT_EN		BIT(7)
94 #define		CFG_BINNING_EN			BIT(9)
95 #define		CFG_FRAME_BASED_EN		BIT(10)
96 #define		CFG_RAW_CROP_EN			BIT(22)
97 
98 #define VFE_REG_UPDATE_CMD			(0x4ac)
99 #define		REG_UPDATE_RDI(n)		BIT(1 + (n))
100 
101 #define VFE_BUS_IRQ_MASK(n)		(0x2044 + (n) * 4)
102 #define VFE_BUS_IRQ_CLEAR(n)		(0x2050 + (n) * 4)
103 #define VFE_BUS_IRQ_STATUS(n)		(0x205c + (n) * 4)
104 #define		STATUS0_COMP_RESET_DONE		BIT(0)
105 #define		STATUS0_COMP_REG_UPDATE0_DONE	BIT(1)
106 #define		STATUS0_COMP_REG_UPDATE1_DONE	BIT(2)
107 #define		STATUS0_COMP_REG_UPDATE2_DONE	BIT(3)
108 #define		STATUS0_COMP_REG_UPDATE3_DONE	BIT(4)
109 #define		STATUS0_COMP_REG_UPDATE_DONE(n)	BIT((n) + 1)
110 #define		STATUS0_COMP0_BUF_DONE		BIT(5)
111 #define		STATUS0_COMP1_BUF_DONE		BIT(6)
112 #define		STATUS0_COMP2_BUF_DONE		BIT(7)
113 #define		STATUS0_COMP3_BUF_DONE		BIT(8)
114 #define		STATUS0_COMP4_BUF_DONE		BIT(9)
115 #define		STATUS0_COMP5_BUF_DONE		BIT(10)
116 #define		STATUS0_COMP_BUF_DONE(n)	BIT((n) + 5)
117 #define		STATUS0_COMP_ERROR		BIT(11)
118 #define		STATUS0_COMP_OVERWRITE		BIT(12)
119 #define		STATUS0_OVERFLOW		BIT(13)
120 #define		STATUS0_VIOLATION		BIT(14)
121 /* WM_CLIENT_BUF_DONE defined for buffers 0:19 */
122 #define		STATUS1_WM_CLIENT_BUF_DONE(n)		BIT(n)
123 #define		STATUS1_EARLY_DONE			BIT(24)
124 #define		STATUS2_DUAL_COMP0_BUF_DONE		BIT(0)
125 #define		STATUS2_DUAL_COMP1_BUF_DONE		BIT(1)
126 #define		STATUS2_DUAL_COMP2_BUF_DONE		BIT(2)
127 #define		STATUS2_DUAL_COMP3_BUF_DONE		BIT(3)
128 #define		STATUS2_DUAL_COMP4_BUF_DONE		BIT(4)
129 #define		STATUS2_DUAL_COMP5_BUF_DONE		BIT(5)
130 #define		STATUS2_DUAL_COMP_BUF_DONE(n)		BIT(n)
131 #define		STATUS2_DUAL_COMP_ERROR			BIT(6)
132 #define		STATUS2_DUAL_COMP_OVERWRITE		BIT(7)
133 
134 #define VFE_BUS_IRQ_CLEAR_GLOBAL		(0x2068)
135 
136 #define VFE_BUS_WM_DEBUG_STATUS_CFG		(0x226c)
137 #define		DEBUG_STATUS_CFG_STATUS0(n)	BIT(n)
138 #define		DEBUG_STATUS_CFG_STATUS1(n)	BIT(8 + (n))
139 
140 #define VFE_BUS_WM_ADDR_SYNC_FRAME_HEADER	(0x2080)
141 
142 #define VFE_BUS_WM_ADDR_SYNC_NO_SYNC		(0x2084)
143 #define		BUS_VER2_MAX_CLIENTS (24)
144 #define		WM_ADDR_NO_SYNC_DEFAULT_VAL \
145 				((1 << BUS_VER2_MAX_CLIENTS) - 1)
146 
147 #define VFE_BUS_WM_CGC_OVERRIDE			(0x200c)
148 #define		WM_CGC_OVERRIDE_ALL		(0xFFFFF)
149 
150 #define VFE_BUS_WM_TEST_BUS_CTRL		(0x211c)
151 
152 #define VFE_BUS_WM_STATUS0(n)			(0x2200 + (n) * 0x100)
153 #define VFE_BUS_WM_STATUS1(n)			(0x2204 + (n) * 0x100)
154 #define VFE_BUS_WM_CFG(n)			(0x2208 + (n) * 0x100)
155 #define		WM_CFG_EN			(0)
156 #define		WM_CFG_MODE			(1)
157 #define			MODE_QCOM_PLAIN	(0)
158 #define			MODE_MIPI_RAW	(1)
159 #define		WM_CFG_VIRTUALFRAME		(2)
160 #define VFE_BUS_WM_HEADER_ADDR(n)		(0x220c + (n) * 0x100)
161 #define VFE_BUS_WM_HEADER_CFG(n)		(0x2210 + (n) * 0x100)
162 #define VFE_BUS_WM_IMAGE_ADDR(n)		(0x2214 + (n) * 0x100)
163 #define VFE_BUS_WM_IMAGE_ADDR_OFFSET(n)		(0x2218 + (n) * 0x100)
164 #define VFE_BUS_WM_BUFFER_WIDTH_CFG(n)		(0x221c + (n) * 0x100)
165 #define		WM_BUFFER_DEFAULT_WIDTH		(0xFF01)
166 
167 #define VFE_BUS_WM_BUFFER_HEIGHT_CFG(n)		(0x2220 + (n) * 0x100)
168 #define VFE_BUS_WM_PACKER_CFG(n)		(0x2224 + (n) * 0x100)
169 
170 #define VFE_BUS_WM_STRIDE(n)			(0x2228 + (n) * 0x100)
171 #define		WM_STRIDE_DEFAULT_STRIDE	(0xFF01)
172 
173 #define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n)	(0x2248 + (n) * 0x100)
174 #define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n)	(0x224c + (n) * 0x100)
175 #define VFE_BUS_WM_FRAMEDROP_PERIOD(n)		(0x2250 + (n) * 0x100)
176 #define VFE_BUS_WM_FRAMEDROP_PATTERN(n)		(0x2254 + (n) * 0x100)
177 #define VFE_BUS_WM_FRAME_INC(n)			(0x2258 + (n) * 0x100)
178 #define VFE_BUS_WM_BURST_LIMIT(n)		(0x225c + (n) * 0x100)
179 
vfe_hw_version(struct vfe_device * vfe)180 static u32 vfe_hw_version(struct vfe_device *vfe)
181 {
182 	u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
183 
184 	u32 gen = (hw_version >> 28) & 0xF;
185 	u32 rev = (hw_version >> 16) & 0xFFF;
186 	u32 step = hw_version & 0xFFFF;
187 
188 	dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n",
189 		gen, rev, step);
190 
191 	return hw_version;
192 }
193 
vfe_reg_clr(struct vfe_device * vfe,u32 reg,u32 clr_bits)194 static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
195 {
196 	u32 bits = readl_relaxed(vfe->base + reg);
197 
198 	writel_relaxed(bits & ~clr_bits, vfe->base + reg);
199 }
200 
vfe_reg_set(struct vfe_device * vfe,u32 reg,u32 set_bits)201 static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
202 {
203 	u32 bits = readl_relaxed(vfe->base + reg);
204 
205 	writel_relaxed(bits | set_bits, vfe->base + reg);
206 }
207 
vfe_global_reset(struct vfe_device * vfe)208 static void vfe_global_reset(struct vfe_device *vfe)
209 {
210 	u32 reset_bits = GLOBAL_RESET_CMD_CORE		|
211 			 GLOBAL_RESET_CMD_CAMIF		|
212 			 GLOBAL_RESET_CMD_BUS		|
213 			 GLOBAL_RESET_CMD_BUS_BDG	|
214 			 GLOBAL_RESET_CMD_REGISTER	|
215 			 GLOBAL_RESET_CMD_TESTGEN	|
216 			 GLOBAL_RESET_CMD_DSP		|
217 			 GLOBAL_RESET_CMD_IDLE_CGC	|
218 			 GLOBAL_RESET_CMD_RDI0		|
219 			 GLOBAL_RESET_CMD_RDI1		|
220 			 GLOBAL_RESET_CMD_RDI2;
221 
222 	writel_relaxed(BIT(31), vfe->base + VFE_IRQ_MASK_0);
223 
224 	/* Make sure IRQ mask has been written before resetting */
225 	wmb();
226 
227 	writel_relaxed(reset_bits, vfe->base + VFE_GLOBAL_RESET_CMD);
228 }
229 
vfe_wm_start(struct vfe_device * vfe,u8 wm,struct vfe_line * line)230 static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
231 {
232 	u32 val;
233 
234 	/*Set Debug Registers*/
235 	val = DEBUG_STATUS_CFG_STATUS0(1) |
236 	      DEBUG_STATUS_CFG_STATUS0(7);
237 	writel_relaxed(val, vfe->base + VFE_BUS_WM_DEBUG_STATUS_CFG);
238 
239 	/* BUS_WM_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
240 	writel_relaxed(0, vfe->base + VFE_BUS_WM_ADDR_SYNC_FRAME_HEADER);
241 
242 	/* no clock gating at bus input */
243 	val = WM_CGC_OVERRIDE_ALL;
244 	writel_relaxed(val, vfe->base + VFE_BUS_WM_CGC_OVERRIDE);
245 
246 	writel_relaxed(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
247 
248 	/* if addr_no_sync has default value then config the addr no sync reg */
249 	val = WM_ADDR_NO_SYNC_DEFAULT_VAL;
250 	writel_relaxed(val, vfe->base + VFE_BUS_WM_ADDR_SYNC_NO_SYNC);
251 
252 	writel_relaxed(0xf, vfe->base + VFE_BUS_WM_BURST_LIMIT(wm));
253 
254 	val = WM_BUFFER_DEFAULT_WIDTH;
255 	writel_relaxed(val, vfe->base + VFE_BUS_WM_BUFFER_WIDTH_CFG(wm));
256 
257 	val = 0;
258 	writel_relaxed(val, vfe->base + VFE_BUS_WM_BUFFER_HEIGHT_CFG(wm));
259 
260 	val = 0;
261 	writel_relaxed(val, vfe->base + VFE_BUS_WM_PACKER_CFG(wm)); // XXX 1 for PLAIN8?
262 
263 	/* Configure stride for RDIs */
264 	val = WM_STRIDE_DEFAULT_STRIDE;
265 	writel_relaxed(val, vfe->base + VFE_BUS_WM_STRIDE(wm));
266 
267 	/* Enable WM */
268 	val = 1 << WM_CFG_EN |
269 	      MODE_MIPI_RAW << WM_CFG_MODE;
270 	writel_relaxed(val, vfe->base + VFE_BUS_WM_CFG(wm));
271 }
272 
vfe_wm_stop(struct vfe_device * vfe,u8 wm)273 static void vfe_wm_stop(struct vfe_device *vfe, u8 wm)
274 {
275 	/* Disable WM */
276 	writel_relaxed(0, vfe->base + VFE_BUS_WM_CFG(wm));
277 }
278 
vfe_wm_update(struct vfe_device * vfe,u8 wm,u32 addr,struct vfe_line * line)279 static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
280 			  struct vfe_line *line)
281 {
282 	struct v4l2_pix_format_mplane *pix =
283 		&line->video_out.active_fmt.fmt.pix_mp;
284 	u32 stride = pix->plane_fmt[0].bytesperline;
285 
286 	writel_relaxed(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
287 	writel_relaxed(stride * pix->height, vfe->base + VFE_BUS_WM_FRAME_INC(wm));
288 }
289 
vfe_reg_update(struct vfe_device * vfe,enum vfe_line_id line_id)290 static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
291 {
292 	vfe->reg_update |= REG_UPDATE_RDI(line_id);
293 
294 	/* Enforce ordering between previous reg writes and reg update */
295 	wmb();
296 
297 	writel_relaxed(vfe->reg_update, vfe->base + VFE_REG_UPDATE_CMD);
298 
299 	/* Enforce ordering between reg update and subsequent reg writes */
300 	wmb();
301 }
302 
vfe_reg_update_clear(struct vfe_device * vfe,enum vfe_line_id line_id)303 static inline void vfe_reg_update_clear(struct vfe_device *vfe,
304 					enum vfe_line_id line_id)
305 {
306 	vfe->reg_update &= ~REG_UPDATE_RDI(line_id);
307 }
308 
vfe_enable_irq_common(struct vfe_device * vfe)309 static void vfe_enable_irq_common(struct vfe_device *vfe)
310 {
311 	vfe_reg_set(vfe, VFE_IRQ_MASK_0, ~0u);
312 	vfe_reg_set(vfe, VFE_IRQ_MASK_1, ~0u);
313 
314 	writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(0));
315 	writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(1));
316 	writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(2));
317 }
318 
vfe_isr_halt_ack(struct vfe_device * vfe)319 static void vfe_isr_halt_ack(struct vfe_device *vfe)
320 {
321 	complete(&vfe->halt_complete);
322 }
323 
vfe_isr_read(struct vfe_device * vfe,u32 * status0,u32 * status1)324 static void vfe_isr_read(struct vfe_device *vfe, u32 *status0, u32 *status1)
325 {
326 	*status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0);
327 	*status1 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_1);
328 
329 	writel_relaxed(*status0, vfe->base + VFE_IRQ_CLEAR_0);
330 	writel_relaxed(*status1, vfe->base + VFE_IRQ_CLEAR_1);
331 
332 	/* Enforce ordering between IRQ Clear and Global IRQ Clear */
333 	wmb();
334 	writel_relaxed(CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
335 }
336 
vfe_violation_read(struct vfe_device * vfe)337 static void vfe_violation_read(struct vfe_device *vfe)
338 {
339 	u32 violation = readl_relaxed(vfe->base + VFE_VIOLATION_STATUS);
340 
341 	pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
342 }
343 
344 /*
345  * vfe_isr - VFE module interrupt handler
346  * @irq: Interrupt line
347  * @dev: VFE device
348  *
349  * Return IRQ_HANDLED on success
350  */
vfe_isr(int irq,void * dev)351 static irqreturn_t vfe_isr(int irq, void *dev)
352 {
353 	struct vfe_device *vfe = dev;
354 	u32 status0, status1, vfe_bus_status[3];
355 	int i, wm;
356 
357 	status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0);
358 	status1 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_1);
359 
360 	writel_relaxed(status0, vfe->base + VFE_IRQ_CLEAR_0);
361 	writel_relaxed(status1, vfe->base + VFE_IRQ_CLEAR_1);
362 
363 	for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) {
364 		vfe_bus_status[i] = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(i));
365 		writel_relaxed(vfe_bus_status[i], vfe->base + VFE_BUS_IRQ_CLEAR(i));
366 	}
367 
368 	/* Enforce ordering between IRQ reading and interpretation */
369 	wmb();
370 
371 	writel_relaxed(CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
372 	writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL);
373 
374 	if (status0 & STATUS_0_RESET_ACK)
375 		vfe->isr_ops.reset_ack(vfe);
376 
377 	for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
378 		if (status0 & STATUS_0_RDI_REG_UPDATE(i))
379 			vfe->isr_ops.reg_update(vfe, i);
380 
381 	for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
382 		if (status0 & STATUS_1_RDI_SOF(i))
383 			vfe->isr_ops.sof(vfe, i);
384 
385 	for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
386 		if (vfe_bus_status[0] & STATUS0_COMP_BUF_DONE(i))
387 			vfe->isr_ops.comp_done(vfe, i);
388 
389 	for (wm = 0; wm < MSM_VFE_IMAGE_MASTERS_NUM; wm++)
390 		if (status0 & BIT(9))
391 			if (vfe_bus_status[1] & STATUS1_WM_CLIENT_BUF_DONE(wm))
392 				vfe->isr_ops.wm_done(vfe, wm);
393 
394 	return IRQ_HANDLED;
395 }
396 
397 /*
398  * vfe_halt - Trigger halt on VFE module and wait to complete
399  * @vfe: VFE device
400  *
401  * Return 0 on success or a negative error code otherwise
402  */
vfe_halt(struct vfe_device * vfe)403 static int vfe_halt(struct vfe_device *vfe)
404 {
405 	unsigned long time;
406 
407 	reinit_completion(&vfe->halt_complete);
408 
409 	time = wait_for_completion_timeout(&vfe->halt_complete,
410 					   msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
411 	if (!time) {
412 		dev_err(vfe->camss->dev, "VFE halt timeout\n");
413 		return -EIO;
414 	}
415 
416 	return 0;
417 }
418 
vfe_get_output(struct vfe_line * line)419 static int vfe_get_output(struct vfe_line *line)
420 {
421 	struct vfe_device *vfe = to_vfe(line);
422 	struct vfe_output *output;
423 	unsigned long flags;
424 	int wm_idx;
425 
426 	spin_lock_irqsave(&vfe->output_lock, flags);
427 
428 	output = &line->output;
429 	if (output->state != VFE_OUTPUT_OFF) {
430 		dev_err(vfe->camss->dev, "Output is running\n");
431 		goto error;
432 	}
433 
434 	output->wm_num = 1;
435 
436 	wm_idx = vfe_reserve_wm(vfe, line->id);
437 	if (wm_idx < 0) {
438 		dev_err(vfe->camss->dev, "Can not reserve wm\n");
439 		goto error_get_wm;
440 	}
441 	output->wm_idx[0] = wm_idx;
442 
443 	output->drop_update_idx = 0;
444 
445 	spin_unlock_irqrestore(&vfe->output_lock, flags);
446 
447 	return 0;
448 
449 error_get_wm:
450 	vfe_release_wm(vfe, output->wm_idx[0]);
451 	output->state = VFE_OUTPUT_OFF;
452 error:
453 	spin_unlock_irqrestore(&vfe->output_lock, flags);
454 
455 	return -EINVAL;
456 }
457 
vfe_enable_output(struct vfe_line * line)458 static int vfe_enable_output(struct vfe_line *line)
459 {
460 	struct vfe_device *vfe = to_vfe(line);
461 	struct vfe_output *output = &line->output;
462 	const struct vfe_hw_ops *ops = vfe->ops;
463 	struct media_entity *sensor;
464 	unsigned long flags;
465 	unsigned int frame_skip = 0;
466 	unsigned int i;
467 
468 	sensor = camss_find_sensor(&line->subdev.entity);
469 	if (sensor) {
470 		struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor);
471 
472 		v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
473 		/* Max frame skip is 29 frames */
474 		if (frame_skip > VFE_FRAME_DROP_VAL - 1)
475 			frame_skip = VFE_FRAME_DROP_VAL - 1;
476 	}
477 
478 	spin_lock_irqsave(&vfe->output_lock, flags);
479 
480 	ops->reg_update_clear(vfe, line->id);
481 
482 	if (output->state != VFE_OUTPUT_OFF) {
483 		dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
484 			output->state);
485 		spin_unlock_irqrestore(&vfe->output_lock, flags);
486 		return -EINVAL;
487 	}
488 
489 	WARN_ON(output->gen2.active_num);
490 
491 	output->state = VFE_OUTPUT_ON;
492 
493 	output->sequence = 0;
494 	output->wait_reg_update = 0;
495 	reinit_completion(&output->reg_update);
496 
497 	vfe_wm_start(vfe, output->wm_idx[0], line);
498 
499 	for (i = 0; i < 2; i++) {
500 		output->buf[i] = vfe_buf_get_pending(output);
501 		if (!output->buf[i])
502 			break;
503 		output->gen2.active_num++;
504 		vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line);
505 	}
506 
507 	ops->reg_update(vfe, line->id);
508 
509 	spin_unlock_irqrestore(&vfe->output_lock, flags);
510 
511 	return 0;
512 }
513 
vfe_disable_output(struct vfe_line * line)514 static int vfe_disable_output(struct vfe_line *line)
515 {
516 	struct vfe_device *vfe = to_vfe(line);
517 	struct vfe_output *output = &line->output;
518 	unsigned long flags;
519 	unsigned int i;
520 	bool done;
521 	int timeout = 0;
522 
523 	do {
524 		spin_lock_irqsave(&vfe->output_lock, flags);
525 		done = !output->gen2.active_num;
526 		spin_unlock_irqrestore(&vfe->output_lock, flags);
527 		usleep_range(10000, 20000);
528 
529 		if (timeout++ == 100) {
530 			dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
531 			vfe_reset(vfe);
532 			output->gen2.active_num = 0;
533 			return 0;
534 		}
535 	} while (!done);
536 
537 	spin_lock_irqsave(&vfe->output_lock, flags);
538 	for (i = 0; i < output->wm_num; i++)
539 		vfe_wm_stop(vfe, output->wm_idx[i]);
540 	spin_unlock_irqrestore(&vfe->output_lock, flags);
541 
542 	return 0;
543 }
544 
545 /*
546  * vfe_enable - Enable streaming on VFE line
547  * @line: VFE line
548  *
549  * Return 0 on success or a negative error code otherwise
550  */
vfe_enable(struct vfe_line * line)551 static int vfe_enable(struct vfe_line *line)
552 {
553 	struct vfe_device *vfe = to_vfe(line);
554 	int ret;
555 
556 	mutex_lock(&vfe->stream_lock);
557 
558 	if (!vfe->stream_count)
559 		vfe_enable_irq_common(vfe);
560 
561 	vfe->stream_count++;
562 
563 	mutex_unlock(&vfe->stream_lock);
564 
565 	ret = vfe_get_output(line);
566 	if (ret < 0)
567 		goto error_get_output;
568 
569 	ret = vfe_enable_output(line);
570 	if (ret < 0)
571 		goto error_enable_output;
572 
573 	vfe->was_streaming = 1;
574 
575 	return 0;
576 
577 error_enable_output:
578 	vfe_put_output(line);
579 
580 error_get_output:
581 	mutex_lock(&vfe->stream_lock);
582 
583 	vfe->stream_count--;
584 
585 	mutex_unlock(&vfe->stream_lock);
586 
587 	return ret;
588 }
589 
590 /*
591  * vfe_disable - Disable streaming on VFE line
592  * @line: VFE line
593  *
594  * Return 0 on success or a negative error code otherwise
595  */
vfe_disable(struct vfe_line * line)596 static int vfe_disable(struct vfe_line *line)
597 {
598 	struct vfe_device *vfe = to_vfe(line);
599 
600 	vfe_disable_output(line);
601 
602 	vfe_put_output(line);
603 
604 	mutex_lock(&vfe->stream_lock);
605 
606 	vfe->stream_count--;
607 
608 	mutex_unlock(&vfe->stream_lock);
609 
610 	return 0;
611 }
612 
613 /*
614  * vfe_isr_sof - Process start of frame interrupt
615  * @vfe: VFE Device
616  * @line_id: VFE line
617  */
vfe_isr_sof(struct vfe_device * vfe,enum vfe_line_id line_id)618 static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
619 {
620 	/* nop */
621 }
622 
623 /*
624  * vfe_isr_reg_update - Process reg update interrupt
625  * @vfe: VFE Device
626  * @line_id: VFE line
627  */
vfe_isr_reg_update(struct vfe_device * vfe,enum vfe_line_id line_id)628 static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
629 {
630 	struct vfe_output *output;
631 	unsigned long flags;
632 
633 	spin_lock_irqsave(&vfe->output_lock, flags);
634 	vfe->ops->reg_update_clear(vfe, line_id);
635 
636 	output = &vfe->line[line_id].output;
637 
638 	if (output->wait_reg_update) {
639 		output->wait_reg_update = 0;
640 		complete(&output->reg_update);
641 	}
642 
643 	spin_unlock_irqrestore(&vfe->output_lock, flags);
644 }
645 
646 /*
647  * vfe_isr_wm_done - Process write master done interrupt
648  * @vfe: VFE Device
649  * @wm: Write master id
650  */
vfe_isr_wm_done(struct vfe_device * vfe,u8 wm)651 static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
652 {
653 	struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]];
654 	struct camss_buffer *ready_buf;
655 	struct vfe_output *output;
656 	unsigned long flags;
657 	u32 index;
658 	u64 ts = ktime_get_ns();
659 
660 	spin_lock_irqsave(&vfe->output_lock, flags);
661 
662 	if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
663 		dev_err_ratelimited(vfe->camss->dev,
664 				    "Received wm done for unmapped index\n");
665 		goto out_unlock;
666 	}
667 	output = &vfe->line[vfe->wm_output_map[wm]].output;
668 
669 	ready_buf = output->buf[0];
670 	if (!ready_buf) {
671 		dev_err_ratelimited(vfe->camss->dev,
672 				    "Missing ready buf %d!\n", output->state);
673 		goto out_unlock;
674 	}
675 
676 	ready_buf->vb.vb2_buf.timestamp = ts;
677 	ready_buf->vb.sequence = output->sequence++;
678 
679 	index = 0;
680 	output->buf[0] = output->buf[1];
681 	if (output->buf[0])
682 		index = 1;
683 
684 	output->buf[index] = vfe_buf_get_pending(output);
685 
686 	if (output->buf[index])
687 		vfe_wm_update(vfe, output->wm_idx[0], output->buf[index]->addr[0], line);
688 	else
689 		output->gen2.active_num--;
690 
691 	spin_unlock_irqrestore(&vfe->output_lock, flags);
692 
693 	vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
694 
695 	return;
696 
697 out_unlock:
698 	spin_unlock_irqrestore(&vfe->output_lock, flags);
699 }
700 
701 /*
702  * vfe_pm_domain_off - Disable power domains specific to this VFE.
703  * @vfe: VFE Device
704  */
vfe_pm_domain_off(struct vfe_device * vfe)705 static void vfe_pm_domain_off(struct vfe_device *vfe)
706 {
707 	/* nop */
708 }
709 
710 /*
711  * vfe_pm_domain_on - Enable power domains specific to this VFE.
712  * @vfe: VFE Device
713  */
vfe_pm_domain_on(struct vfe_device * vfe)714 static int vfe_pm_domain_on(struct vfe_device *vfe)
715 {
716 	return 0;
717 }
718 
719 /*
720  * vfe_queue_buffer - Add empty buffer
721  * @vid: Video device structure
722  * @buf: Buffer to be enqueued
723  *
724  * Add an empty buffer - depending on the current number of buffers it will be
725  * put in pending buffer queue or directly given to the hardware to be filled.
726  *
727  * Return 0 on success or a negative error code otherwise
728  */
vfe_queue_buffer(struct camss_video * vid,struct camss_buffer * buf)729 static int vfe_queue_buffer(struct camss_video *vid,
730 			    struct camss_buffer *buf)
731 {
732 	struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
733 	struct vfe_device *vfe = to_vfe(line);
734 	struct vfe_output *output;
735 	unsigned long flags;
736 
737 	output = &line->output;
738 
739 	spin_lock_irqsave(&vfe->output_lock, flags);
740 
741 	if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) {
742 		output->buf[output->gen2.active_num++] = buf;
743 		vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line);
744 	} else {
745 		vfe_buf_add_pending(output, buf);
746 	}
747 
748 	spin_unlock_irqrestore(&vfe->output_lock, flags);
749 
750 	return 0;
751 }
752 
753 static const struct vfe_isr_ops vfe_isr_ops_170 = {
754 	.reset_ack = vfe_isr_reset_ack,
755 	.halt_ack = vfe_isr_halt_ack,
756 	.reg_update = vfe_isr_reg_update,
757 	.sof = vfe_isr_sof,
758 	.comp_done = vfe_isr_comp_done,
759 	.wm_done = vfe_isr_wm_done,
760 };
761 
762 static const struct camss_video_ops vfe_video_ops_170 = {
763 	.queue_buffer = vfe_queue_buffer,
764 	.flush_buffers = vfe_flush_buffers,
765 };
766 
vfe_subdev_init(struct device * dev,struct vfe_device * vfe)767 static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
768 {
769 	vfe->isr_ops = vfe_isr_ops_170;
770 	vfe->video_ops = vfe_video_ops_170;
771 
772 	vfe->line_num = VFE_LINE_NUM_GEN2;
773 }
774 
775 const struct vfe_hw_ops vfe_ops_170 = {
776 	.global_reset = vfe_global_reset,
777 	.hw_version = vfe_hw_version,
778 	.isr_read = vfe_isr_read,
779 	.isr = vfe_isr,
780 	.pm_domain_off = vfe_pm_domain_off,
781 	.pm_domain_on = vfe_pm_domain_on,
782 	.reg_update_clear = vfe_reg_update_clear,
783 	.reg_update = vfe_reg_update,
784 	.subdev_init = vfe_subdev_init,
785 	.vfe_disable = vfe_disable,
786 	.vfe_enable = vfe_enable,
787 	.vfe_halt = vfe_halt,
788 	.violation_read = vfe_violation_read,
789 };
790