1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2016-2017, Linaro Limited
4  * Copyright (c) 2014, STMicroelectronics International N.V.
5  */
6 
7 #include <arm.h>
8 #include <assert.h>
9 #include <config.h>
10 #include <drivers/gic.h>
11 #include <keep.h>
12 #include <kernel/dt.h>
13 #include <kernel/interrupt.h>
14 #include <kernel/panic.h>
15 #include <libfdt.h>
16 #include <util.h>
17 #include <io.h>
18 #include <trace.h>
19 
20 /* Offsets from gic.gicc_base */
21 #define GICC_CTLR		(0x000)
22 #define GICC_PMR		(0x004)
23 #define GICC_IAR		(0x00C)
24 #define GICC_EOIR		(0x010)
25 
26 #define GICC_CTLR_ENABLEGRP0	(1 << 0)
27 #define GICC_CTLR_ENABLEGRP1	(1 << 1)
28 #define GICD_CTLR_ENABLEGRP1S	(1 << 2)
29 #define GICC_CTLR_FIQEN		(1 << 3)
30 
31 /* Offsets from gic.gicd_base */
32 #define GICD_CTLR		(0x000)
33 #define GICD_TYPER		(0x004)
34 #define GICD_IGROUPR(n)		(0x080 + (n) * 4)
35 #define GICD_ISENABLER(n)	(0x100 + (n) * 4)
36 #define GICD_ICENABLER(n)	(0x180 + (n) * 4)
37 #define GICD_ISPENDR(n)		(0x200 + (n) * 4)
38 #define GICD_ICPENDR(n)		(0x280 + (n) * 4)
39 #define GICD_IPRIORITYR(n)	(0x400 + (n) * 4)
40 #define GICD_ITARGETSR(n)	(0x800 + (n) * 4)
41 #define GICD_IGROUPMODR(n)	(0xd00 + (n) * 4)
42 #define GICD_SGIR		(0xF00)
43 
44 #define GICD_CTLR_ENABLEGRP0	(1 << 0)
45 #define GICD_CTLR_ENABLEGRP1	(1 << 1)
46 
47 /* Number of Private Peripheral Interrupt */
48 #define NUM_PPI	32
49 
50 /* Number of Software Generated Interrupt */
51 #define NUM_SGI			16
52 
53 /* Number of Non-secure Software Generated Interrupt */
54 #define NUM_NS_SGI		8
55 
56 /* Number of interrupts in one register */
57 #define NUM_INTS_PER_REG	32
58 
59 /* Number of targets in one register */
60 #define NUM_TARGETS_PER_REG	4
61 
62 /* Accessors to access ITARGETSRn */
63 #define ITARGETSR_FIELD_BITS	8
64 #define ITARGETSR_FIELD_MASK	0xff
65 
66 /* Maximum number of interrups a GIC can support */
67 #define GIC_MAX_INTS		1020
68 
69 #define GICC_IAR_IT_ID_MASK	0x3ff
70 #define GICC_IAR_CPU_ID_MASK	0x7
71 #define GICC_IAR_CPU_ID_SHIFT	10
72 
73 static void gic_op_add(struct itr_chip *chip, size_t it, uint32_t type,
74 		       uint32_t prio);
75 static void gic_op_enable(struct itr_chip *chip, size_t it);
76 static void gic_op_disable(struct itr_chip *chip, size_t it);
77 static void gic_op_raise_pi(struct itr_chip *chip, size_t it);
78 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
79 			uint8_t cpu_mask);
80 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
81 			uint8_t cpu_mask);
82 
83 static const struct itr_ops gic_ops = {
84 	.add = gic_op_add,
85 	.enable = gic_op_enable,
86 	.disable = gic_op_disable,
87 	.raise_pi = gic_op_raise_pi,
88 	.raise_sgi = gic_op_raise_sgi,
89 	.set_affinity = gic_op_set_affinity,
90 };
91 DECLARE_KEEP_PAGER(gic_ops);
92 
probe_max_it(vaddr_t gicc_base __maybe_unused,vaddr_t gicd_base)93 static size_t probe_max_it(vaddr_t gicc_base __maybe_unused, vaddr_t gicd_base)
94 {
95 	int i;
96 	uint32_t old_ctlr;
97 	size_t ret = 0;
98 	const size_t max_regs = ((GIC_MAX_INTS + NUM_INTS_PER_REG - 1) /
99 					NUM_INTS_PER_REG) - 1;
100 
101 	/*
102 	 * Probe which interrupt number is the largest.
103 	 */
104 #if defined(CFG_ARM_GICV3)
105 	old_ctlr = read_icc_ctlr();
106 	write_icc_ctlr(0);
107 #else
108 	old_ctlr = io_read32(gicc_base + GICC_CTLR);
109 	io_write32(gicc_base + GICC_CTLR, 0);
110 #endif
111 	for (i = max_regs; i >= 0; i--) {
112 		uint32_t old_reg;
113 		uint32_t reg;
114 		int b;
115 
116 		old_reg = io_read32(gicd_base + GICD_ISENABLER(i));
117 		io_write32(gicd_base + GICD_ISENABLER(i), 0xffffffff);
118 		reg = io_read32(gicd_base + GICD_ISENABLER(i));
119 		io_write32(gicd_base + GICD_ICENABLER(i), ~old_reg);
120 		for (b = NUM_INTS_PER_REG - 1; b >= 0; b--) {
121 			if (BIT32(b) & reg) {
122 				ret = i * NUM_INTS_PER_REG + b;
123 				goto out;
124 			}
125 		}
126 	}
127 out:
128 #if defined(CFG_ARM_GICV3)
129 	write_icc_ctlr(old_ctlr);
130 #else
131 	io_write32(gicc_base + GICC_CTLR, old_ctlr);
132 #endif
133 	return ret;
134 }
135 
gic_cpu_init(struct gic_data * gd)136 void gic_cpu_init(struct gic_data *gd)
137 {
138 #if defined(CFG_ARM_GICV3)
139 	assert(gd->gicd_base);
140 #else
141 	assert(gd->gicd_base && gd->gicc_base);
142 #endif
143 
144 	/* per-CPU interrupts config:
145 	 * ID0-ID7(SGI)   for Non-secure interrupts
146 	 * ID8-ID15(SGI)  for Secure interrupts.
147 	 * All PPI config as Non-secure interrupts.
148 	 */
149 	io_write32(gd->gicd_base + GICD_IGROUPR(0), 0xffff00ff);
150 
151 	/* Set the priority mask to permit Non-secure interrupts, and to
152 	 * allow the Non-secure world to adjust the priority mask itself
153 	 */
154 #if defined(CFG_ARM_GICV3)
155 	write_icc_pmr(0x80);
156 	write_icc_igrpen1(1);
157 #else
158 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
159 
160 	/* Enable GIC */
161 	io_write32(gd->gicc_base + GICC_CTLR,
162 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1 |
163 		   GICC_CTLR_FIQEN);
164 #endif
165 }
166 
gic_init(struct gic_data * gd,vaddr_t gicc_base __maybe_unused,vaddr_t gicd_base)167 void gic_init(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
168 	      vaddr_t gicd_base)
169 {
170 	size_t n;
171 
172 	gic_init_base_addr(gd, gicc_base, gicd_base);
173 
174 	for (n = 0; n <= gd->max_it / NUM_INTS_PER_REG; n++) {
175 		/* Disable interrupts */
176 		io_write32(gd->gicd_base + GICD_ICENABLER(n), 0xffffffff);
177 
178 		/* Make interrupts non-pending */
179 		io_write32(gd->gicd_base + GICD_ICPENDR(n), 0xffffffff);
180 
181 		/* Mark interrupts non-secure */
182 		if (n == 0) {
183 			/* per-CPU inerrupts config:
184                          * ID0-ID7(SGI)   for Non-secure interrupts
185                          * ID8-ID15(SGI)  for Secure interrupts.
186                          * All PPI config as Non-secure interrupts.
187 			 */
188 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffff00ff);
189 		} else {
190 			io_write32(gd->gicd_base + GICD_IGROUPR(n), 0xffffffff);
191 		}
192 	}
193 
194 	/* Set the priority mask to permit Non-secure interrupts, and to
195 	 * allow the Non-secure world to adjust the priority mask itself
196 	 */
197 #if defined(CFG_ARM_GICV3)
198 	write_icc_pmr(0x80);
199 	write_icc_igrpen1(1);
200 	io_setbits32(gd->gicd_base + GICD_CTLR, GICD_CTLR_ENABLEGRP1S);
201 #else
202 	io_write32(gd->gicc_base + GICC_PMR, 0x80);
203 
204 	/* Enable GIC */
205 	io_write32(gd->gicc_base + GICC_CTLR, GICC_CTLR_FIQEN |
206 		   GICC_CTLR_ENABLEGRP0 | GICC_CTLR_ENABLEGRP1);
207 	io_setbits32(gd->gicd_base + GICD_CTLR,
208 		     GICD_CTLR_ENABLEGRP0 | GICD_CTLR_ENABLEGRP1);
209 #endif
210 }
211 
gic_dt_get_irq(const uint32_t * properties,int count,uint32_t * type,uint32_t * prio)212 static int gic_dt_get_irq(const uint32_t *properties, int count, uint32_t *type,
213 			  uint32_t *prio)
214 {
215 	int it_num = DT_INFO_INVALID_INTERRUPT;
216 
217 	if (type)
218 		*type = IRQ_TYPE_NONE;
219 
220 	if (prio)
221 		*prio = 0;
222 
223 	if (!properties || count < 2)
224 		return DT_INFO_INVALID_INTERRUPT;
225 
226 	it_num = fdt32_to_cpu(properties[1]);
227 
228 	switch (fdt32_to_cpu(properties[0])) {
229 	case 1:
230 		it_num += 16;
231 		break;
232 	case 0:
233 		it_num += 32;
234 		break;
235 	default:
236 		it_num = DT_INFO_INVALID_INTERRUPT;
237 	}
238 
239 	return it_num;
240 }
241 
gic_init_base_addr(struct gic_data * gd,vaddr_t gicc_base __maybe_unused,vaddr_t gicd_base)242 void gic_init_base_addr(struct gic_data *gd, vaddr_t gicc_base __maybe_unused,
243 			vaddr_t gicd_base)
244 {
245 	gd->gicc_base = gicc_base;
246 	gd->gicd_base = gicd_base;
247 	gd->max_it = probe_max_it(gicc_base, gicd_base);
248 	gd->chip.ops = &gic_ops;
249 
250 	if (IS_ENABLED(CFG_DT))
251 		gd->chip.dt_get_irq = gic_dt_get_irq;
252 }
253 
gic_it_add(struct gic_data * gd,size_t it)254 static void gic_it_add(struct gic_data *gd, size_t it)
255 {
256 	size_t idx = it / NUM_INTS_PER_REG;
257 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
258 
259 	/* Disable the interrupt */
260 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
261 	/* Make it non-pending */
262 	io_write32(gd->gicd_base + GICD_ICPENDR(idx), mask);
263 	/* Assign it to group0 */
264 	io_clrbits32(gd->gicd_base + GICD_IGROUPR(idx), mask);
265 #if defined(CFG_ARM_GICV3)
266 	/* Assign it to group1S */
267 	io_setbits32(gd->gicd_base + GICD_IGROUPMODR(idx), mask);
268 #endif
269 }
270 
gic_it_set_cpu_mask(struct gic_data * gd,size_t it,uint8_t cpu_mask)271 static void gic_it_set_cpu_mask(struct gic_data *gd, size_t it,
272 				uint8_t cpu_mask)
273 {
274 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
275 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
276 	uint32_t target, target_shift;
277 	vaddr_t itargetsr = gd->gicd_base +
278 			    GICD_ITARGETSR(it / NUM_TARGETS_PER_REG);
279 
280 	/* Assigned to group0 */
281 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
282 
283 	/* Route it to selected CPUs */
284 	target = io_read32(itargetsr);
285 	target_shift = (it % NUM_TARGETS_PER_REG) * ITARGETSR_FIELD_BITS;
286 	target &= ~(ITARGETSR_FIELD_MASK << target_shift);
287 	target |= cpu_mask << target_shift;
288 	DMSG("cpu_mask: writing 0x%x to 0x%" PRIxVA, target, itargetsr);
289 	io_write32(itargetsr, target);
290 	DMSG("cpu_mask: 0x%x", io_read32(itargetsr));
291 }
292 
gic_it_set_prio(struct gic_data * gd,size_t it,uint8_t prio)293 static void gic_it_set_prio(struct gic_data *gd, size_t it, uint8_t prio)
294 {
295 	size_t idx __maybe_unused = it / NUM_INTS_PER_REG;
296 	uint32_t mask __maybe_unused = 1 << (it % NUM_INTS_PER_REG);
297 
298 	/* Assigned to group0 */
299 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
300 
301 	/* Set prio it to selected CPUs */
302 	DMSG("prio: writing 0x%x to 0x%" PRIxVA,
303 		prio, gd->gicd_base + GICD_IPRIORITYR(0) + it);
304 	io_write8(gd->gicd_base + GICD_IPRIORITYR(0) + it, prio);
305 }
306 
gic_it_enable(struct gic_data * gd,size_t it)307 static void gic_it_enable(struct gic_data *gd, size_t it)
308 {
309 	size_t idx = it / NUM_INTS_PER_REG;
310 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
311 	vaddr_t base = gd->gicd_base;
312 
313 	/* Assigned to group0 */
314 	assert(!(io_read32(base + GICD_IGROUPR(idx)) & mask));
315 
316 	/* Enable the interrupt */
317 	io_write32(base + GICD_ISENABLER(idx), mask);
318 }
319 
gic_it_disable(struct gic_data * gd,size_t it)320 static void gic_it_disable(struct gic_data *gd, size_t it)
321 {
322 	size_t idx = it / NUM_INTS_PER_REG;
323 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
324 
325 	/* Assigned to group0 */
326 	assert(!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask));
327 
328 	/* Disable the interrupt */
329 	io_write32(gd->gicd_base + GICD_ICENABLER(idx), mask);
330 }
331 
gic_it_set_pending(struct gic_data * gd,size_t it)332 static void gic_it_set_pending(struct gic_data *gd, size_t it)
333 {
334 	size_t idx = it / NUM_INTS_PER_REG;
335 	uint32_t mask = BIT32(it % NUM_INTS_PER_REG);
336 
337 	/* Should be Peripheral Interrupt */
338 	assert(it >= NUM_SGI);
339 
340 	/* Raise the interrupt */
341 	io_write32(gd->gicd_base + GICD_ISPENDR(idx), mask);
342 }
343 
gic_it_raise_sgi(struct gic_data * gd,size_t it,uint8_t cpu_mask,uint8_t group)344 static void gic_it_raise_sgi(struct gic_data *gd, size_t it,
345 		uint8_t cpu_mask, uint8_t group)
346 {
347 	uint32_t mask_id = it & 0xf;
348 	uint32_t mask_group = group & 0x1;
349 	uint32_t mask_cpu = cpu_mask & 0xff;
350 	uint32_t mask = (mask_id | SHIFT_U32(mask_group, 15) |
351 		SHIFT_U32(mask_cpu, 16));
352 
353 	/* Should be Software Generated Interrupt */
354 	assert(it < NUM_SGI);
355 
356 	/* Raise the interrupt */
357 	io_write32(gd->gicd_base + GICD_SGIR, mask);
358 }
359 
gic_read_iar(struct gic_data * gd __maybe_unused)360 static uint32_t gic_read_iar(struct gic_data *gd __maybe_unused)
361 {
362 #if defined(CFG_ARM_GICV3)
363 	return read_icc_iar1();
364 #else
365 	return io_read32(gd->gicc_base + GICC_IAR);
366 #endif
367 }
368 
gic_write_eoir(struct gic_data * gd __maybe_unused,uint32_t eoir)369 static void gic_write_eoir(struct gic_data *gd __maybe_unused, uint32_t eoir)
370 {
371 #if defined(CFG_ARM_GICV3)
372 	write_icc_eoir1(eoir);
373 #else
374 	io_write32(gd->gicc_base + GICC_EOIR, eoir);
375 #endif
376 }
377 
gic_it_is_enabled(struct gic_data * gd,size_t it)378 static bool gic_it_is_enabled(struct gic_data *gd, size_t it)
379 {
380 	size_t idx = it / NUM_INTS_PER_REG;
381 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
382 	return !!(io_read32(gd->gicd_base + GICD_ISENABLER(idx)) & mask);
383 }
384 
gic_it_get_group(struct gic_data * gd,size_t it)385 static bool __maybe_unused gic_it_get_group(struct gic_data *gd, size_t it)
386 {
387 	size_t idx = it / NUM_INTS_PER_REG;
388 	uint32_t mask = 1 << (it % NUM_INTS_PER_REG);
389 	return !!(io_read32(gd->gicd_base + GICD_IGROUPR(idx)) & mask);
390 }
391 
gic_it_get_target(struct gic_data * gd,size_t it)392 static uint32_t __maybe_unused gic_it_get_target(struct gic_data *gd, size_t it)
393 {
394 	size_t reg_idx = it / NUM_TARGETS_PER_REG;
395 	uint32_t target_shift = (it % NUM_TARGETS_PER_REG) *
396 				ITARGETSR_FIELD_BITS;
397 	uint32_t target_mask = ITARGETSR_FIELD_MASK << target_shift;
398 	uint32_t target = io_read32(gd->gicd_base + GICD_ITARGETSR(reg_idx));
399 
400 	return (target & target_mask) >> target_shift;
401 }
402 
gic_dump_state(struct gic_data * gd)403 void gic_dump_state(struct gic_data *gd)
404 {
405 	int i;
406 
407 #if defined(CFG_ARM_GICV3)
408 	DMSG("GICC_CTLR: 0x%x", read_icc_ctlr());
409 #else
410 	DMSG("GICC_CTLR: 0x%x", io_read32(gd->gicc_base + GICC_CTLR));
411 #endif
412 	DMSG("GICD_CTLR: 0x%x", io_read32(gd->gicd_base + GICD_CTLR));
413 
414 	for (i = 0; i <= (int)gd->max_it; i++) {
415 		if (gic_it_is_enabled(gd, i)) {
416 			DMSG("irq%d: enabled, group:%d, target:%x", i,
417 			     gic_it_get_group(gd, i), gic_it_get_target(gd, i));
418 		}
419 	}
420 }
421 
gic_it_handle(struct gic_data * gd)422 void gic_it_handle(struct gic_data *gd)
423 {
424 	uint32_t iar;
425 	uint32_t id;
426 
427 	iar = gic_read_iar(gd);
428 	id = iar & GICC_IAR_IT_ID_MASK;
429 
430 	if (id <= gd->max_it)
431 		itr_handle(id);
432 	else
433 		DMSG("ignoring interrupt %" PRIu32, id);
434 
435 	gic_write_eoir(gd, iar);
436 }
437 
gic_op_add(struct itr_chip * chip,size_t it,uint32_t type __unused,uint32_t prio __unused)438 static void gic_op_add(struct itr_chip *chip, size_t it,
439 		       uint32_t type __unused,
440 		       uint32_t prio __unused)
441 {
442 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
443 
444 	if (it > gd->max_it)
445 		panic();
446 
447 	gic_it_add(gd, it);
448 	/* Set the CPU mask to deliver interrupts to any online core */
449 	gic_it_set_cpu_mask(gd, it, 0xff);
450 	gic_it_set_prio(gd, it, 0x1);
451 }
452 
gic_op_enable(struct itr_chip * chip,size_t it)453 static void gic_op_enable(struct itr_chip *chip, size_t it)
454 {
455 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
456 
457 	if (it > gd->max_it)
458 		panic();
459 
460 	gic_it_enable(gd, it);
461 }
462 
gic_op_disable(struct itr_chip * chip,size_t it)463 static void gic_op_disable(struct itr_chip *chip, size_t it)
464 {
465 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
466 
467 	if (it > gd->max_it)
468 		panic();
469 
470 	gic_it_disable(gd, it);
471 }
472 
gic_op_raise_pi(struct itr_chip * chip,size_t it)473 static void gic_op_raise_pi(struct itr_chip *chip, size_t it)
474 {
475 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
476 
477 	if (it > gd->max_it)
478 		panic();
479 
480 	gic_it_set_pending(gd, it);
481 }
482 
gic_op_raise_sgi(struct itr_chip * chip,size_t it,uint8_t cpu_mask)483 static void gic_op_raise_sgi(struct itr_chip *chip, size_t it,
484 			uint8_t cpu_mask)
485 {
486 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
487 
488 	if (it > gd->max_it)
489 		panic();
490 
491 	if (it < NUM_NS_SGI)
492 		gic_it_raise_sgi(gd, it, cpu_mask, 1);
493 	else
494 		gic_it_raise_sgi(gd, it, cpu_mask, 0);
495 }
gic_op_set_affinity(struct itr_chip * chip,size_t it,uint8_t cpu_mask)496 static void gic_op_set_affinity(struct itr_chip *chip, size_t it,
497 			uint8_t cpu_mask)
498 {
499 	struct gic_data *gd = container_of(chip, struct gic_data, chip);
500 
501 	if (it > gd->max_it)
502 		panic();
503 
504 	gic_it_set_cpu_mask(gd, it, cpu_mask);
505 }
506