1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Generic Intel ACPI table generation
4  *
5  * Copyright (C) 2017 Intel Corp.
6  * Copyright 2019 Google LLC
7  *
8  * Modified from coreboot src/soc/intel/common/block/acpi.c
9  */
10 
11 #include <common.h>
12 #include <bloblist.h>
13 #include <cpu.h>
14 #include <dm.h>
15 #include <acpi/acpigen.h>
16 #include <asm/acpigen.h>
17 #include <asm/acpi_table.h>
18 #include <asm/cpu.h>
19 #include <asm/cpu_common.h>
20 #include <asm/global_data.h>
21 #include <asm/intel_acpi.h>
22 #include <asm/ioapic.h>
23 #include <asm/mpspec.h>
24 #include <asm/smm.h>
25 #include <asm/turbo.h>
26 #include <asm/intel_gnvs.h>
27 #include <asm/arch/iomap.h>
28 #include <asm/arch/pm.h>
29 #include <asm/arch/systemagent.h>
30 #include <dm/acpi.h>
31 #include <linux/err.h>
32 #include <power/acpi_pmc.h>
33 
acpi_fill_mcfg(u32 current)34 u32 acpi_fill_mcfg(u32 current)
35 {
36 	/* PCI Segment Group 0, Start Bus Number 0, End Bus Number is 255 */
37 	current += acpi_create_mcfg_mmconfig((void *)current,
38 					     CONFIG_MMCONF_BASE_ADDRESS, 0, 0,
39 					     (CONFIG_SA_PCIEX_LENGTH >> 20)
40 					     - 1);
41 	return current;
42 }
43 
acpi_sci_irq(void)44 static int acpi_sci_irq(void)
45 {
46 	int sci_irq = 9;
47 	uint scis;
48 	int ret;
49 
50 	ret = arch_read_sci_irq_select();
51 	if (IS_ERR_VALUE(ret))
52 		return log_msg_ret("sci_irq", ret);
53 	scis = ret;
54 	scis &= SCI_IRQ_MASK;
55 	scis >>= SCI_IRQ_SHIFT;
56 
57 	/* Determine how SCI is routed. */
58 	switch (scis) {
59 	case SCIS_IRQ9:
60 	case SCIS_IRQ10:
61 	case SCIS_IRQ11:
62 		sci_irq = scis - SCIS_IRQ9 + 9;
63 		break;
64 	case SCIS_IRQ20:
65 	case SCIS_IRQ21:
66 	case SCIS_IRQ22:
67 	case SCIS_IRQ23:
68 		sci_irq = scis - SCIS_IRQ20 + 20;
69 		break;
70 	default:
71 		log_warning("Invalid SCI route! Defaulting to IRQ9\n");
72 		sci_irq = 9;
73 		break;
74 	}
75 
76 	log_debug("SCI is IRQ%d\n", sci_irq);
77 
78 	return sci_irq;
79 }
80 
acpi_madt_irq_overrides(unsigned long current)81 static unsigned long acpi_madt_irq_overrides(unsigned long current)
82 {
83 	int sci = acpi_sci_irq();
84 	u16 flags = MP_IRQ_TRIGGER_LEVEL;
85 
86 	if (sci < 0)
87 		return log_msg_ret("sci irq", sci);
88 
89 	/* INT_SRC_OVR */
90 	current += acpi_create_madt_irqoverride((void *)current, 0, 0, 2, 0);
91 
92 	flags |= arch_madt_sci_irq_polarity(sci);
93 
94 	/* SCI */
95 	current +=
96 	    acpi_create_madt_irqoverride((void *)current, 0, sci, sci, flags);
97 
98 	return current;
99 }
100 
acpi_fill_madt(u32 current)101 u32 acpi_fill_madt(u32 current)
102 {
103 	/* Local APICs */
104 	current += acpi_create_madt_lapics(current);
105 
106 	/* IOAPIC */
107 	current += acpi_create_madt_ioapic((void *)current, 2, IO_APIC_ADDR, 0);
108 
109 	return acpi_madt_irq_overrides(current);
110 }
111 
intel_acpi_fill_fadt(struct acpi_fadt * fadt)112 void intel_acpi_fill_fadt(struct acpi_fadt *fadt)
113 {
114 	const u16 pmbase = IOMAP_ACPI_BASE;
115 
116 	/* Use ACPI 3.0 revision. */
117 	fadt->header.revision = acpi_get_table_revision(ACPITAB_FADT);
118 
119 	fadt->sci_int = acpi_sci_irq();
120 	fadt->smi_cmd = APM_CNT;
121 	fadt->acpi_enable = APM_CNT_ACPI_ENABLE;
122 	fadt->acpi_disable = APM_CNT_ACPI_DISABLE;
123 	fadt->s4bios_req = 0x0;
124 	fadt->pstate_cnt = 0;
125 
126 	fadt->pm1a_evt_blk = pmbase + PM1_STS;
127 	fadt->pm1b_evt_blk = 0x0;
128 	fadt->pm1a_cnt_blk = pmbase + PM1_CNT;
129 	fadt->pm1b_cnt_blk = 0x0;
130 
131 	fadt->gpe0_blk = pmbase + GPE0_STS;
132 
133 	fadt->pm1_evt_len = 4;
134 	fadt->pm1_cnt_len = 2;
135 
136 	/* GPE0 STS/EN pairs each 32 bits wide. */
137 	fadt->gpe0_blk_len = 2 * GPE0_REG_MAX * sizeof(uint32_t);
138 
139 	fadt->flush_size = 0x400;	/* twice of cache size */
140 	fadt->flush_stride = 0x10;	/* Cache line width  */
141 	fadt->duty_offset = 1;
142 	fadt->day_alrm = 0xd;
143 
144 	fadt->flags = ACPI_FADT_WBINVD | ACPI_FADT_C1_SUPPORTED |
145 	    ACPI_FADT_C2_MP_SUPPORTED | ACPI_FADT_SLEEP_BUTTON |
146 	    ACPI_FADT_RESET_REGISTER | ACPI_FADT_SEALED_CASE |
147 	    ACPI_FADT_S4_RTC_WAKE | ACPI_FADT_PLATFORM_CLOCK;
148 
149 	fadt->reset_reg.space_id = 1;
150 	fadt->reset_reg.bit_width = 8;
151 	fadt->reset_reg.addrl = IO_PORT_RESET;
152 	fadt->reset_value = RST_CPU | SYS_RST;
153 
154 	fadt->x_pm1a_evt_blk.space_id = 1;
155 	fadt->x_pm1a_evt_blk.bit_width = fadt->pm1_evt_len * 8;
156 	fadt->x_pm1a_evt_blk.addrl = pmbase + PM1_STS;
157 
158 	fadt->x_pm1b_evt_blk.space_id = 1;
159 
160 	fadt->x_pm1a_cnt_blk.space_id = 1;
161 	fadt->x_pm1a_cnt_blk.bit_width = fadt->pm1_cnt_len * 8;
162 	fadt->x_pm1a_cnt_blk.addrl = pmbase + PM1_CNT;
163 
164 	fadt->x_pm1b_cnt_blk.space_id = 1;
165 
166 	fadt->x_gpe1_blk.space_id = 1;
167 }
168 
intel_southbridge_write_acpi_tables(const struct udevice * dev,struct acpi_ctx * ctx)169 int intel_southbridge_write_acpi_tables(const struct udevice *dev,
170 					struct acpi_ctx *ctx)
171 {
172 	int ret;
173 
174 	ret = acpi_write_dbg2_pci_uart(ctx, gd->cur_serial_dev,
175 				       ACPI_ACCESS_SIZE_DWORD_ACCESS);
176 	if (ret)
177 		return log_msg_ret("dbg2", ret);
178 
179 	ret = acpi_write_hpet(ctx);
180 	if (ret)
181 		return log_msg_ret("hpet", ret);
182 
183 	return 0;
184 }
185 
acpi_fill_soc_wake(u32 generic_pm1_en,const struct chipset_power_state * ps)186 __weak u32 acpi_fill_soc_wake(u32 generic_pm1_en,
187 			      const struct chipset_power_state *ps)
188 {
189 	return generic_pm1_en;
190 }
191 
acpi_create_gnvs(struct acpi_global_nvs * gnvs)192 __weak int acpi_create_gnvs(struct acpi_global_nvs *gnvs)
193 {
194 	return 0;
195 }
196 
southbridge_inject_dsdt(const struct udevice * dev,struct acpi_ctx * ctx)197 int southbridge_inject_dsdt(const struct udevice *dev, struct acpi_ctx *ctx)
198 {
199 	struct acpi_global_nvs *gnvs;
200 	int ret;
201 
202 	ret = bloblist_ensure_size(BLOBLISTT_ACPI_GNVS, sizeof(*gnvs), 0,
203 				   (void **)&gnvs);
204 	if (ret)
205 		return log_msg_ret("bloblist", ret);
206 
207 	ret = acpi_create_gnvs(gnvs);
208 	if (ret)
209 		return log_msg_ret("gnvs", ret);
210 
211 	/*
212 	 * TODO(sjg@chromum.org): tell SMI about it
213 	 * smm_setup_structures(gnvs, NULL, NULL);
214 	 */
215 
216 	/* Add it to DSDT */
217 	acpigen_write_scope(ctx, "\\");
218 	acpigen_write_name_dword(ctx, "NVSA", (uintptr_t)gnvs);
219 	acpigen_pop_len(ctx);
220 
221 	return 0;
222 }
223 
calculate_power(int tdp,int p1_ratio,int ratio)224 static int calculate_power(int tdp, int p1_ratio, int ratio)
225 {
226 	u32 m;
227 	u32 power;
228 
229 	/*
230 	 * M = ((1.1 - ((p1_ratio - ratio) * 0.00625)) / 1.1) ^ 2
231 	 *
232 	 * Power = (ratio / p1_ratio) * m * tdp
233 	 */
234 
235 	m = (110000 - ((p1_ratio - ratio) * 625)) / 11;
236 	m = (m * m) / 1000;
237 
238 	power = ((ratio * 100000 / p1_ratio) / 100);
239 	power *= (m / 100) * (tdp / 1000);
240 	power /= 1000;
241 
242 	return power;
243 }
244 
generate_p_state_entries(struct acpi_ctx * ctx,int core,int cores_per_package)245 void generate_p_state_entries(struct acpi_ctx *ctx, int core,
246 			      int cores_per_package)
247 {
248 	int ratio_min, ratio_max, ratio_turbo, ratio_step;
249 	int coord_type, power_max, num_entries;
250 	int ratio, power, clock, clock_max;
251 	bool turbo;
252 
253 	coord_type = cpu_get_coord_type();
254 	ratio_min = cpu_get_min_ratio();
255 	ratio_max = cpu_get_max_ratio();
256 	clock_max = (ratio_max * cpu_get_bus_clock_khz()) / 1000;
257 	turbo = (turbo_get_state() == TURBO_ENABLED);
258 
259 	/* Calculate CPU TDP in mW */
260 	power_max = cpu_get_power_max();
261 
262 	/* Write _PCT indicating use of FFixedHW */
263 	acpigen_write_empty_pct(ctx);
264 
265 	/* Write _PPC with no limit on supported P-state */
266 	acpigen_write_ppc_nvs(ctx);
267 	/* Write PSD indicating configured coordination type */
268 	acpigen_write_psd_package(ctx, core, 1, coord_type);
269 
270 	/* Add P-state entries in _PSS table */
271 	acpigen_write_name(ctx, "_PSS");
272 
273 	/* Determine ratio points */
274 	ratio_step = PSS_RATIO_STEP;
275 	do {
276 		num_entries = ((ratio_max - ratio_min) / ratio_step) + 1;
277 		if (((ratio_max - ratio_min) % ratio_step) > 0)
278 			num_entries += 1;
279 		if (turbo)
280 			num_entries += 1;
281 		if (num_entries > PSS_MAX_ENTRIES)
282 			ratio_step += 1;
283 	} while (num_entries > PSS_MAX_ENTRIES);
284 
285 	/* _PSS package count depends on Turbo */
286 	acpigen_write_package(ctx, num_entries);
287 
288 	/* P[T] is Turbo state if enabled */
289 	if (turbo) {
290 		ratio_turbo = cpu_get_max_turbo_ratio();
291 
292 		/* Add entry for Turbo ratio */
293 		acpigen_write_pss_package(ctx, clock_max + 1,	/* MHz */
294 					  power_max,		/* mW */
295 					  PSS_LATENCY_TRANSITION,/* lat1 */
296 					  PSS_LATENCY_BUSMASTER,/* lat2 */
297 					  ratio_turbo << 8,	/* control */
298 					  ratio_turbo << 8);	/* status */
299 		num_entries -= 1;
300 	}
301 
302 	/* First regular entry is max non-turbo ratio */
303 	acpigen_write_pss_package(ctx, clock_max,	/* MHz */
304 				  power_max,		/* mW */
305 				  PSS_LATENCY_TRANSITION,/* lat1 */
306 				  PSS_LATENCY_BUSMASTER,/* lat2 */
307 				  ratio_max << 8,	/* control */
308 				  ratio_max << 8);	/* status */
309 	num_entries -= 1;
310 
311 	/* Generate the remaining entries */
312 	for (ratio = ratio_min + ((num_entries - 1) * ratio_step);
313 	     ratio >= ratio_min; ratio -= ratio_step) {
314 		/* Calculate power at this ratio */
315 		power = calculate_power(power_max, ratio_max, ratio);
316 		clock = (ratio * cpu_get_bus_clock_khz()) / 1000;
317 
318 		acpigen_write_pss_package(ctx, clock,		/* MHz */
319 					  power,		/* mW */
320 					  PSS_LATENCY_TRANSITION,/* lat1 */
321 					  PSS_LATENCY_BUSMASTER,/* lat2 */
322 					  ratio << 8,		/* control */
323 					  ratio << 8);		/* status */
324 	}
325 	/* Fix package length */
326 	acpigen_pop_len(ctx);
327 }
328 
generate_t_state_entries(struct acpi_ctx * ctx,int core,int cores_per_package,struct acpi_tstate * entry,int nentries)329 void generate_t_state_entries(struct acpi_ctx *ctx, int core,
330 			      int cores_per_package, struct acpi_tstate *entry,
331 			      int nentries)
332 {
333 	if (!nentries)
334 		return;
335 
336 	/* Indicate SW_ALL coordination for T-states */
337 	acpigen_write_tsd_package(ctx, core, cores_per_package, SW_ALL);
338 
339 	/* Indicate FixedHW so OS will use MSR */
340 	acpigen_write_empty_ptc(ctx);
341 
342 	/* Set NVS controlled T-state limit */
343 	acpigen_write_tpc(ctx, "\\TLVL");
344 
345 	/* Write TSS table for MSR access */
346 	acpigen_write_tss_package(ctx, entry, nentries);
347 }
348 
acpi_generate_cpu_header(struct acpi_ctx * ctx,int core_id,const struct acpi_cstate * c_state_map,int num_cstates)349 int acpi_generate_cpu_header(struct acpi_ctx *ctx, int core_id,
350 			     const struct acpi_cstate *c_state_map,
351 			     int num_cstates)
352 {
353 	bool is_first = !core_id;
354 
355 	/* Generate processor \_PR.CPUx */
356 	acpigen_write_processor(ctx, core_id, is_first ? ACPI_BASE_ADDRESS : 0,
357 				is_first ? 6 : 0);
358 
359 	/* Generate C-state tables */
360 	acpigen_write_cst_package(ctx, c_state_map, num_cstates);
361 
362 	return 0;
363 }
364 
acpi_generate_cpu_package_final(struct acpi_ctx * ctx,int cores_per_package)365 int acpi_generate_cpu_package_final(struct acpi_ctx *ctx, int cores_per_package)
366 {
367 	/*
368 	 * PPKG is usually used for thermal management of the first and only
369 	 * package
370 	 */
371 	acpigen_write_processor_package(ctx, "PPKG", 0, cores_per_package);
372 
373 	/* Add a method to notify processor nodes */
374 	acpigen_write_processor_cnot(ctx, cores_per_package);
375 
376 	return 0;
377 }
378