1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4  * Synopsys DesignWare eDMA v0 core
5  *
6  * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7  */
8 
9 #include <linux/bitfield.h>
10 
11 #include "dw-edma-core.h"
12 #include "dw-edma-v0-core.h"
13 #include "dw-edma-v0-regs.h"
14 #include "dw-edma-v0-debugfs.h"
15 
16 enum dw_edma_control {
17 	DW_EDMA_V0_CB					= BIT(0),
18 	DW_EDMA_V0_TCB					= BIT(1),
19 	DW_EDMA_V0_LLP					= BIT(2),
20 	DW_EDMA_V0_LIE					= BIT(3),
21 	DW_EDMA_V0_RIE					= BIT(4),
22 	DW_EDMA_V0_CCS					= BIT(8),
23 	DW_EDMA_V0_LLE					= BIT(9),
24 };
25 
__dw_regs(struct dw_edma * dw)26 static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
27 {
28 	return dw->rg_region.vaddr;
29 }
30 
31 #define SET_32(dw, name, value)				\
32 	writel(value, &(__dw_regs(dw)->name))
33 
34 #define GET_32(dw, name)				\
35 	readl(&(__dw_regs(dw)->name))
36 
37 #define SET_RW_32(dw, dir, name, value)			\
38 	do {						\
39 		if ((dir) == EDMA_DIR_WRITE)		\
40 			SET_32(dw, wr_##name, value);	\
41 		else					\
42 			SET_32(dw, rd_##name, value);	\
43 	} while (0)
44 
45 #define GET_RW_32(dw, dir, name)			\
46 	((dir) == EDMA_DIR_WRITE			\
47 	  ? GET_32(dw, wr_##name)			\
48 	  : GET_32(dw, rd_##name))
49 
50 #define SET_BOTH_32(dw, name, value)			\
51 	do {						\
52 		SET_32(dw, wr_##name, value);		\
53 		SET_32(dw, rd_##name, value);		\
54 	} while (0)
55 
56 #ifdef CONFIG_64BIT
57 
58 #define SET_64(dw, name, value)				\
59 	writeq(value, &(__dw_regs(dw)->name))
60 
61 #define GET_64(dw, name)				\
62 	readq(&(__dw_regs(dw)->name))
63 
64 #define SET_RW_64(dw, dir, name, value)			\
65 	do {						\
66 		if ((dir) == EDMA_DIR_WRITE)		\
67 			SET_64(dw, wr_##name, value);	\
68 		else					\
69 			SET_64(dw, rd_##name, value);	\
70 	} while (0)
71 
72 #define GET_RW_64(dw, dir, name)			\
73 	((dir) == EDMA_DIR_WRITE			\
74 	  ? GET_64(dw, wr_##name)			\
75 	  : GET_64(dw, rd_##name))
76 
77 #define SET_BOTH_64(dw, name, value)			\
78 	do {						\
79 		SET_64(dw, wr_##name, value);		\
80 		SET_64(dw, rd_##name, value);		\
81 	} while (0)
82 
83 #endif /* CONFIG_64BIT */
84 
85 #define SET_COMPAT(dw, name, value)			\
86 	writel(value, &(__dw_regs(dw)->type.unroll.name))
87 
88 #define SET_RW_COMPAT(dw, dir, name, value)		\
89 	do {						\
90 		if ((dir) == EDMA_DIR_WRITE)		\
91 			SET_COMPAT(dw, wr_##name, value); \
92 		else					\
93 			SET_COMPAT(dw, rd_##name, value); \
94 	} while (0)
95 
96 static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch)97 __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
98 {
99 	if (dw->mf == EDMA_MF_EDMA_LEGACY)
100 		return &(__dw_regs(dw)->type.legacy.ch);
101 
102 	if (dir == EDMA_DIR_WRITE)
103 		return &__dw_regs(dw)->type.unroll.ch[ch].wr;
104 
105 	return &__dw_regs(dw)->type.unroll.ch[ch].rd;
106 }
107 
writel_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,u32 value,void __iomem * addr)108 static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
109 			     u32 value, void __iomem *addr)
110 {
111 	if (dw->mf == EDMA_MF_EDMA_LEGACY) {
112 		u32 viewport_sel;
113 		unsigned long flags;
114 
115 		raw_spin_lock_irqsave(&dw->lock, flags);
116 
117 		viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
118 		if (dir == EDMA_DIR_READ)
119 			viewport_sel |= BIT(31);
120 
121 		writel(viewport_sel,
122 		       &(__dw_regs(dw)->type.legacy.viewport_sel));
123 		writel(value, addr);
124 
125 		raw_spin_unlock_irqrestore(&dw->lock, flags);
126 	} else {
127 		writel(value, addr);
128 	}
129 }
130 
readl_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,const void __iomem * addr)131 static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
132 			   const void __iomem *addr)
133 {
134 	u32 value;
135 
136 	if (dw->mf == EDMA_MF_EDMA_LEGACY) {
137 		u32 viewport_sel;
138 		unsigned long flags;
139 
140 		raw_spin_lock_irqsave(&dw->lock, flags);
141 
142 		viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
143 		if (dir == EDMA_DIR_READ)
144 			viewport_sel |= BIT(31);
145 
146 		writel(viewport_sel,
147 		       &(__dw_regs(dw)->type.legacy.viewport_sel));
148 		value = readl(addr);
149 
150 		raw_spin_unlock_irqrestore(&dw->lock, flags);
151 	} else {
152 		value = readl(addr);
153 	}
154 
155 	return value;
156 }
157 
158 #define SET_CH_32(dw, dir, ch, name, value) \
159 	writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
160 
161 #define GET_CH_32(dw, dir, ch, name) \
162 	readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
163 
164 #define SET_LL_32(ll, value) \
165 	writel(value, ll)
166 
167 #ifdef CONFIG_64BIT
168 
writeq_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,u64 value,void __iomem * addr)169 static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
170 			     u64 value, void __iomem *addr)
171 {
172 	if (dw->mf == EDMA_MF_EDMA_LEGACY) {
173 		u32 viewport_sel;
174 		unsigned long flags;
175 
176 		raw_spin_lock_irqsave(&dw->lock, flags);
177 
178 		viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
179 		if (dir == EDMA_DIR_READ)
180 			viewport_sel |= BIT(31);
181 
182 		writel(viewport_sel,
183 		       &(__dw_regs(dw)->type.legacy.viewport_sel));
184 		writeq(value, addr);
185 
186 		raw_spin_unlock_irqrestore(&dw->lock, flags);
187 	} else {
188 		writeq(value, addr);
189 	}
190 }
191 
readq_ch(struct dw_edma * dw,enum dw_edma_dir dir,u16 ch,const void __iomem * addr)192 static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
193 			   const void __iomem *addr)
194 {
195 	u32 value;
196 
197 	if (dw->mf == EDMA_MF_EDMA_LEGACY) {
198 		u32 viewport_sel;
199 		unsigned long flags;
200 
201 		raw_spin_lock_irqsave(&dw->lock, flags);
202 
203 		viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
204 		if (dir == EDMA_DIR_READ)
205 			viewport_sel |= BIT(31);
206 
207 		writel(viewport_sel,
208 		       &(__dw_regs(dw)->type.legacy.viewport_sel));
209 		value = readq(addr);
210 
211 		raw_spin_unlock_irqrestore(&dw->lock, flags);
212 	} else {
213 		value = readq(addr);
214 	}
215 
216 	return value;
217 }
218 
219 #define SET_CH_64(dw, dir, ch, name, value) \
220 	writeq_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
221 
222 #define GET_CH_64(dw, dir, ch, name) \
223 	readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
224 
225 #define SET_LL_64(ll, value) \
226 	writeq(value, ll)
227 
228 #endif /* CONFIG_64BIT */
229 
230 /* eDMA management callbacks */
dw_edma_v0_core_off(struct dw_edma * dw)231 void dw_edma_v0_core_off(struct dw_edma *dw)
232 {
233 	SET_BOTH_32(dw, int_mask,
234 		    EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
235 	SET_BOTH_32(dw, int_clear,
236 		    EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
237 	SET_BOTH_32(dw, engine_en, 0);
238 }
239 
dw_edma_v0_core_ch_count(struct dw_edma * dw,enum dw_edma_dir dir)240 u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
241 {
242 	u32 num_ch;
243 
244 	if (dir == EDMA_DIR_WRITE)
245 		num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK,
246 				   GET_32(dw, ctrl));
247 	else
248 		num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK,
249 				   GET_32(dw, ctrl));
250 
251 	if (num_ch > EDMA_V0_MAX_NR_CH)
252 		num_ch = EDMA_V0_MAX_NR_CH;
253 
254 	return (u16)num_ch;
255 }
256 
dw_edma_v0_core_ch_status(struct dw_edma_chan * chan)257 enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
258 {
259 	struct dw_edma *dw = chan->chip->dw;
260 	u32 tmp;
261 
262 	tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
263 			GET_CH_32(dw, chan->dir, chan->id, ch_control1));
264 
265 	if (tmp == 1)
266 		return DMA_IN_PROGRESS;
267 	else if (tmp == 3)
268 		return DMA_COMPLETE;
269 	else
270 		return DMA_ERROR;
271 }
272 
dw_edma_v0_core_clear_done_int(struct dw_edma_chan * chan)273 void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
274 {
275 	struct dw_edma *dw = chan->chip->dw;
276 
277 	SET_RW_32(dw, chan->dir, int_clear,
278 		  FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
279 }
280 
dw_edma_v0_core_clear_abort_int(struct dw_edma_chan * chan)281 void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
282 {
283 	struct dw_edma *dw = chan->chip->dw;
284 
285 	SET_RW_32(dw, chan->dir, int_clear,
286 		  FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
287 }
288 
dw_edma_v0_core_status_done_int(struct dw_edma * dw,enum dw_edma_dir dir)289 u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
290 {
291 	return FIELD_GET(EDMA_V0_DONE_INT_MASK,
292 			 GET_RW_32(dw, dir, int_status));
293 }
294 
dw_edma_v0_core_status_abort_int(struct dw_edma * dw,enum dw_edma_dir dir)295 u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
296 {
297 	return FIELD_GET(EDMA_V0_ABORT_INT_MASK,
298 			 GET_RW_32(dw, dir, int_status));
299 }
300 
dw_edma_v0_core_write_chunk(struct dw_edma_chunk * chunk)301 static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
302 {
303 	struct dw_edma_burst *child;
304 	struct dw_edma_v0_lli __iomem *lli;
305 	struct dw_edma_v0_llp __iomem *llp;
306 	u32 control = 0, i = 0;
307 	int j;
308 
309 	lli = chunk->ll_region.vaddr;
310 
311 	if (chunk->cb)
312 		control = DW_EDMA_V0_CB;
313 
314 	j = chunk->bursts_alloc;
315 	list_for_each_entry(child, &chunk->burst->list, list) {
316 		j--;
317 		if (!j)
318 			control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
319 
320 		/* Channel control */
321 		SET_LL_32(&lli[i].control, control);
322 		/* Transfer size */
323 		SET_LL_32(&lli[i].transfer_size, child->sz);
324 		/* SAR */
325 		#ifdef CONFIG_64BIT
326 			SET_LL_64(&lli[i].sar.reg, child->sar);
327 		#else /* CONFIG_64BIT */
328 			SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
329 			SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
330 		#endif /* CONFIG_64BIT */
331 		/* DAR */
332 		#ifdef CONFIG_64BIT
333 			SET_LL_64(&lli[i].dar.reg, child->dar);
334 		#else /* CONFIG_64BIT */
335 			SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
336 			SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
337 		#endif /* CONFIG_64BIT */
338 		i++;
339 	}
340 
341 	llp = (void __iomem *)&lli[i];
342 	control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
343 	if (!chunk->cb)
344 		control |= DW_EDMA_V0_CB;
345 
346 	/* Channel control */
347 	SET_LL_32(&llp->control, control);
348 	/* Linked list */
349 	#ifdef CONFIG_64BIT
350 		SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
351 	#else /* CONFIG_64BIT */
352 		SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
353 		SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
354 	#endif /* CONFIG_64BIT */
355 }
356 
dw_edma_v0_core_start(struct dw_edma_chunk * chunk,bool first)357 void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
358 {
359 	struct dw_edma_chan *chan = chunk->chan;
360 	struct dw_edma *dw = chan->chip->dw;
361 	u32 tmp;
362 
363 	dw_edma_v0_core_write_chunk(chunk);
364 
365 	if (first) {
366 		/* Enable engine */
367 		SET_RW_32(dw, chan->dir, engine_en, BIT(0));
368 		if (dw->mf == EDMA_MF_HDMA_COMPAT) {
369 			switch (chan->id) {
370 			case 0:
371 				SET_RW_COMPAT(dw, chan->dir, ch0_pwr_en,
372 					      BIT(0));
373 				break;
374 			case 1:
375 				SET_RW_COMPAT(dw, chan->dir, ch1_pwr_en,
376 					      BIT(0));
377 				break;
378 			case 2:
379 				SET_RW_COMPAT(dw, chan->dir, ch2_pwr_en,
380 					      BIT(0));
381 				break;
382 			case 3:
383 				SET_RW_COMPAT(dw, chan->dir, ch3_pwr_en,
384 					      BIT(0));
385 				break;
386 			case 4:
387 				SET_RW_COMPAT(dw, chan->dir, ch4_pwr_en,
388 					      BIT(0));
389 				break;
390 			case 5:
391 				SET_RW_COMPAT(dw, chan->dir, ch5_pwr_en,
392 					      BIT(0));
393 				break;
394 			case 6:
395 				SET_RW_COMPAT(dw, chan->dir, ch6_pwr_en,
396 					      BIT(0));
397 				break;
398 			case 7:
399 				SET_RW_COMPAT(dw, chan->dir, ch7_pwr_en,
400 					      BIT(0));
401 				break;
402 			}
403 		}
404 		/* Interrupt unmask - done, abort */
405 		tmp = GET_RW_32(dw, chan->dir, int_mask);
406 		tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
407 		tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
408 		SET_RW_32(dw, chan->dir, int_mask, tmp);
409 		/* Linked list error */
410 		tmp = GET_RW_32(dw, chan->dir, linked_list_err_en);
411 		tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
412 		SET_RW_32(dw, chan->dir, linked_list_err_en, tmp);
413 		/* Channel control */
414 		SET_CH_32(dw, chan->dir, chan->id, ch_control1,
415 			  (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
416 		/* Linked list */
417 		#ifdef CONFIG_64BIT
418 			SET_CH_64(dw, chan->dir, chan->id, llp.reg,
419 				  chunk->ll_region.paddr);
420 		#else /* CONFIG_64BIT */
421 			SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
422 				  lower_32_bits(chunk->ll_region.paddr));
423 			SET_CH_32(dw, chan->dir, chan->id, llp.msb,
424 				  upper_32_bits(chunk->ll_region.paddr));
425 		#endif /* CONFIG_64BIT */
426 	}
427 	/* Doorbell */
428 	SET_RW_32(dw, chan->dir, doorbell,
429 		  FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
430 }
431 
dw_edma_v0_core_device_config(struct dw_edma_chan * chan)432 int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
433 {
434 	struct dw_edma *dw = chan->chip->dw;
435 	u32 tmp = 0;
436 
437 	/* MSI done addr - low, high */
438 	SET_RW_32(dw, chan->dir, done_imwr.lsb, chan->msi.address_lo);
439 	SET_RW_32(dw, chan->dir, done_imwr.msb, chan->msi.address_hi);
440 	/* MSI abort addr - low, high */
441 	SET_RW_32(dw, chan->dir, abort_imwr.lsb, chan->msi.address_lo);
442 	SET_RW_32(dw, chan->dir, abort_imwr.msb, chan->msi.address_hi);
443 	/* MSI data - low, high */
444 	switch (chan->id) {
445 	case 0:
446 	case 1:
447 		tmp = GET_RW_32(dw, chan->dir, ch01_imwr_data);
448 		break;
449 
450 	case 2:
451 	case 3:
452 		tmp = GET_RW_32(dw, chan->dir, ch23_imwr_data);
453 		break;
454 
455 	case 4:
456 	case 5:
457 		tmp = GET_RW_32(dw, chan->dir, ch45_imwr_data);
458 		break;
459 
460 	case 6:
461 	case 7:
462 		tmp = GET_RW_32(dw, chan->dir, ch67_imwr_data);
463 		break;
464 	}
465 
466 	if (chan->id & BIT(0)) {
467 		/* Channel odd {1, 3, 5, 7} */
468 		tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK;
469 		tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK,
470 				  chan->msi.data);
471 	} else {
472 		/* Channel even {0, 2, 4, 6} */
473 		tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK;
474 		tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK,
475 				  chan->msi.data);
476 	}
477 
478 	switch (chan->id) {
479 	case 0:
480 	case 1:
481 		SET_RW_32(dw, chan->dir, ch01_imwr_data, tmp);
482 		break;
483 
484 	case 2:
485 	case 3:
486 		SET_RW_32(dw, chan->dir, ch23_imwr_data, tmp);
487 		break;
488 
489 	case 4:
490 	case 5:
491 		SET_RW_32(dw, chan->dir, ch45_imwr_data, tmp);
492 		break;
493 
494 	case 6:
495 	case 7:
496 		SET_RW_32(dw, chan->dir, ch67_imwr_data, tmp);
497 		break;
498 	}
499 
500 	return 0;
501 }
502 
503 /* eDMA debugfs callbacks */
dw_edma_v0_core_debugfs_on(struct dw_edma_chip * chip)504 void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
505 {
506 	dw_edma_v0_debugfs_on(chip);
507 }
508 
dw_edma_v0_core_debugfs_off(struct dw_edma_chip * chip)509 void dw_edma_v0_core_debugfs_off(struct dw_edma_chip *chip)
510 {
511 	dw_edma_v0_debugfs_off(chip);
512 }
513