1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Support for dynamic reconfiguration for PCI, Memory, and CPU
4  * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5  *
6  * Copyright (C) 2009 Nathan Fontenot
7  * Copyright (C) 2009 IBM Corporation
8  */
9 
10 #define pr_fmt(fmt)	"dlpar: " fmt
11 
12 #include <linux/kernel.h>
13 #include <linux/notifier.h>
14 #include <linux/spinlock.h>
15 #include <linux/cpu.h>
16 #include <linux/slab.h>
17 #include <linux/of.h>
18 
19 #include "of_helpers.h"
20 #include "pseries.h"
21 
22 #include <asm/prom.h>
23 #include <asm/machdep.h>
24 #include <linux/uaccess.h>
25 #include <asm/rtas.h>
26 
27 static struct workqueue_struct *pseries_hp_wq;
28 
29 struct pseries_hp_work {
30 	struct work_struct work;
31 	struct pseries_hp_errorlog *errlog;
32 };
33 
34 struct cc_workarea {
35 	__be32	drc_index;
36 	__be32	zero;
37 	__be32	name_offset;
38 	__be32	prop_length;
39 	__be32	prop_offset;
40 };
41 
dlpar_free_cc_property(struct property * prop)42 void dlpar_free_cc_property(struct property *prop)
43 {
44 	kfree(prop->name);
45 	kfree(prop->value);
46 	kfree(prop);
47 }
48 
dlpar_parse_cc_property(struct cc_workarea * ccwa)49 static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
50 {
51 	struct property *prop;
52 	char *name;
53 	char *value;
54 
55 	prop = kzalloc(sizeof(*prop), GFP_KERNEL);
56 	if (!prop)
57 		return NULL;
58 
59 	name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
60 	prop->name = kstrdup(name, GFP_KERNEL);
61 	if (!prop->name) {
62 		dlpar_free_cc_property(prop);
63 		return NULL;
64 	}
65 
66 	prop->length = be32_to_cpu(ccwa->prop_length);
67 	value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
68 	prop->value = kmemdup(value, prop->length, GFP_KERNEL);
69 	if (!prop->value) {
70 		dlpar_free_cc_property(prop);
71 		return NULL;
72 	}
73 
74 	return prop;
75 }
76 
dlpar_parse_cc_node(struct cc_workarea * ccwa)77 static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
78 {
79 	struct device_node *dn;
80 	const char *name;
81 
82 	dn = kzalloc(sizeof(*dn), GFP_KERNEL);
83 	if (!dn)
84 		return NULL;
85 
86 	name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset);
87 	dn->full_name = kstrdup(name, GFP_KERNEL);
88 	if (!dn->full_name) {
89 		kfree(dn);
90 		return NULL;
91 	}
92 
93 	of_node_set_flag(dn, OF_DYNAMIC);
94 	of_node_init(dn);
95 
96 	return dn;
97 }
98 
dlpar_free_one_cc_node(struct device_node * dn)99 static void dlpar_free_one_cc_node(struct device_node *dn)
100 {
101 	struct property *prop;
102 
103 	while (dn->properties) {
104 		prop = dn->properties;
105 		dn->properties = prop->next;
106 		dlpar_free_cc_property(prop);
107 	}
108 
109 	kfree(dn->full_name);
110 	kfree(dn);
111 }
112 
dlpar_free_cc_nodes(struct device_node * dn)113 void dlpar_free_cc_nodes(struct device_node *dn)
114 {
115 	if (dn->child)
116 		dlpar_free_cc_nodes(dn->child);
117 
118 	if (dn->sibling)
119 		dlpar_free_cc_nodes(dn->sibling);
120 
121 	dlpar_free_one_cc_node(dn);
122 }
123 
124 #define COMPLETE	0
125 #define NEXT_SIBLING    1
126 #define NEXT_CHILD      2
127 #define NEXT_PROPERTY   3
128 #define PREV_PARENT     4
129 #define MORE_MEMORY     5
130 #define ERR_CFG_USE     -9003
131 
dlpar_configure_connector(__be32 drc_index,struct device_node * parent)132 struct device_node *dlpar_configure_connector(__be32 drc_index,
133 					      struct device_node *parent)
134 {
135 	struct device_node *dn;
136 	struct device_node *first_dn = NULL;
137 	struct device_node *last_dn = NULL;
138 	struct property *property;
139 	struct property *last_property = NULL;
140 	struct cc_workarea *ccwa;
141 	char *data_buf;
142 	int cc_token;
143 	int rc = -1;
144 
145 	cc_token = rtas_token("ibm,configure-connector");
146 	if (cc_token == RTAS_UNKNOWN_SERVICE)
147 		return NULL;
148 
149 	data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
150 	if (!data_buf)
151 		return NULL;
152 
153 	ccwa = (struct cc_workarea *)&data_buf[0];
154 	ccwa->drc_index = drc_index;
155 	ccwa->zero = 0;
156 
157 	do {
158 		/* Since we release the rtas_data_buf lock between configure
159 		 * connector calls we want to re-populate the rtas_data_buffer
160 		 * with the contents of the previous call.
161 		 */
162 		spin_lock(&rtas_data_buf_lock);
163 
164 		memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
165 		rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
166 		memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
167 
168 		spin_unlock(&rtas_data_buf_lock);
169 
170 		if (rtas_busy_delay(rc))
171 			continue;
172 
173 		switch (rc) {
174 		case COMPLETE:
175 			break;
176 
177 		case NEXT_SIBLING:
178 			dn = dlpar_parse_cc_node(ccwa);
179 			if (!dn)
180 				goto cc_error;
181 
182 			dn->parent = last_dn->parent;
183 			last_dn->sibling = dn;
184 			last_dn = dn;
185 			break;
186 
187 		case NEXT_CHILD:
188 			dn = dlpar_parse_cc_node(ccwa);
189 			if (!dn)
190 				goto cc_error;
191 
192 			if (!first_dn) {
193 				dn->parent = parent;
194 				first_dn = dn;
195 			} else {
196 				dn->parent = last_dn;
197 				if (last_dn)
198 					last_dn->child = dn;
199 			}
200 
201 			last_dn = dn;
202 			break;
203 
204 		case NEXT_PROPERTY:
205 			property = dlpar_parse_cc_property(ccwa);
206 			if (!property)
207 				goto cc_error;
208 
209 			if (!last_dn->properties)
210 				last_dn->properties = property;
211 			else
212 				last_property->next = property;
213 
214 			last_property = property;
215 			break;
216 
217 		case PREV_PARENT:
218 			last_dn = last_dn->parent;
219 			break;
220 
221 		case MORE_MEMORY:
222 		case ERR_CFG_USE:
223 		default:
224 			printk(KERN_ERR "Unexpected Error (%d) "
225 			       "returned from configure-connector\n", rc);
226 			goto cc_error;
227 		}
228 	} while (rc);
229 
230 cc_error:
231 	kfree(data_buf);
232 
233 	if (rc) {
234 		if (first_dn)
235 			dlpar_free_cc_nodes(first_dn);
236 
237 		return NULL;
238 	}
239 
240 	return first_dn;
241 }
242 
dlpar_attach_node(struct device_node * dn,struct device_node * parent)243 int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
244 {
245 	int rc;
246 
247 	dn->parent = parent;
248 
249 	rc = of_attach_node(dn);
250 	if (rc) {
251 		printk(KERN_ERR "Failed to add device node %pOF\n", dn);
252 		return rc;
253 	}
254 
255 	return 0;
256 }
257 
dlpar_detach_node(struct device_node * dn)258 int dlpar_detach_node(struct device_node *dn)
259 {
260 	struct device_node *child;
261 	int rc;
262 
263 	child = of_get_next_child(dn, NULL);
264 	while (child) {
265 		dlpar_detach_node(child);
266 		child = of_get_next_child(dn, child);
267 	}
268 
269 	rc = of_detach_node(dn);
270 	if (rc)
271 		return rc;
272 
273 	of_node_put(dn);
274 
275 	return 0;
276 }
277 
278 #define DR_ENTITY_SENSE		9003
279 #define DR_ENTITY_PRESENT	1
280 #define DR_ENTITY_UNUSABLE	2
281 #define ALLOCATION_STATE	9003
282 #define ALLOC_UNUSABLE		0
283 #define ALLOC_USABLE		1
284 #define ISOLATION_STATE		9001
285 #define ISOLATE			0
286 #define UNISOLATE		1
287 
dlpar_acquire_drc(u32 drc_index)288 int dlpar_acquire_drc(u32 drc_index)
289 {
290 	int dr_status, rc;
291 
292 	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
293 	if (rc || dr_status != DR_ENTITY_UNUSABLE)
294 		return -1;
295 
296 	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
297 	if (rc)
298 		return rc;
299 
300 	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
301 	if (rc) {
302 		rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
303 		return rc;
304 	}
305 
306 	return 0;
307 }
308 
dlpar_release_drc(u32 drc_index)309 int dlpar_release_drc(u32 drc_index)
310 {
311 	int dr_status, rc;
312 
313 	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
314 	if (rc || dr_status != DR_ENTITY_PRESENT)
315 		return -1;
316 
317 	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
318 	if (rc)
319 		return rc;
320 
321 	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
322 	if (rc) {
323 		rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
324 		return rc;
325 	}
326 
327 	return 0;
328 }
329 
dlpar_unisolate_drc(u32 drc_index)330 int dlpar_unisolate_drc(u32 drc_index)
331 {
332 	int dr_status, rc;
333 
334 	rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
335 	if (rc || dr_status != DR_ENTITY_PRESENT)
336 		return -1;
337 
338 	rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
339 
340 	return 0;
341 }
342 
handle_dlpar_errorlog(struct pseries_hp_errorlog * hp_elog)343 int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
344 {
345 	int rc;
346 
347 	/* pseries error logs are in BE format, convert to cpu type */
348 	switch (hp_elog->id_type) {
349 	case PSERIES_HP_ELOG_ID_DRC_COUNT:
350 		hp_elog->_drc_u.drc_count =
351 				be32_to_cpu(hp_elog->_drc_u.drc_count);
352 		break;
353 	case PSERIES_HP_ELOG_ID_DRC_INDEX:
354 		hp_elog->_drc_u.drc_index =
355 				be32_to_cpu(hp_elog->_drc_u.drc_index);
356 		break;
357 	case PSERIES_HP_ELOG_ID_DRC_IC:
358 		hp_elog->_drc_u.ic.count =
359 				be32_to_cpu(hp_elog->_drc_u.ic.count);
360 		hp_elog->_drc_u.ic.index =
361 				be32_to_cpu(hp_elog->_drc_u.ic.index);
362 	}
363 
364 	switch (hp_elog->resource) {
365 	case PSERIES_HP_ELOG_RESOURCE_MEM:
366 		rc = dlpar_memory(hp_elog);
367 		break;
368 	case PSERIES_HP_ELOG_RESOURCE_CPU:
369 		rc = dlpar_cpu(hp_elog);
370 		break;
371 	case PSERIES_HP_ELOG_RESOURCE_PMEM:
372 		rc = dlpar_hp_pmem(hp_elog);
373 		break;
374 
375 	default:
376 		pr_warn_ratelimited("Invalid resource (%d) specified\n",
377 				    hp_elog->resource);
378 		rc = -EINVAL;
379 	}
380 
381 	return rc;
382 }
383 
pseries_hp_work_fn(struct work_struct * work)384 static void pseries_hp_work_fn(struct work_struct *work)
385 {
386 	struct pseries_hp_work *hp_work =
387 			container_of(work, struct pseries_hp_work, work);
388 
389 	handle_dlpar_errorlog(hp_work->errlog);
390 
391 	kfree(hp_work->errlog);
392 	kfree((void *)work);
393 }
394 
queue_hotplug_event(struct pseries_hp_errorlog * hp_errlog)395 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
396 {
397 	struct pseries_hp_work *work;
398 	struct pseries_hp_errorlog *hp_errlog_copy;
399 
400 	hp_errlog_copy = kmemdup(hp_errlog, sizeof(*hp_errlog), GFP_ATOMIC);
401 	if (!hp_errlog_copy)
402 		return;
403 
404 	work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC);
405 	if (work) {
406 		INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
407 		work->errlog = hp_errlog_copy;
408 		queue_work(pseries_hp_wq, (struct work_struct *)work);
409 	} else {
410 		kfree(hp_errlog_copy);
411 	}
412 }
413 
dlpar_parse_resource(char ** cmd,struct pseries_hp_errorlog * hp_elog)414 static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
415 {
416 	char *arg;
417 
418 	arg = strsep(cmd, " ");
419 	if (!arg)
420 		return -EINVAL;
421 
422 	if (sysfs_streq(arg, "memory")) {
423 		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
424 	} else if (sysfs_streq(arg, "cpu")) {
425 		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
426 	} else {
427 		pr_err("Invalid resource specified.\n");
428 		return -EINVAL;
429 	}
430 
431 	return 0;
432 }
433 
dlpar_parse_action(char ** cmd,struct pseries_hp_errorlog * hp_elog)434 static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
435 {
436 	char *arg;
437 
438 	arg = strsep(cmd, " ");
439 	if (!arg)
440 		return -EINVAL;
441 
442 	if (sysfs_streq(arg, "add")) {
443 		hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
444 	} else if (sysfs_streq(arg, "remove")) {
445 		hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
446 	} else {
447 		pr_err("Invalid action specified.\n");
448 		return -EINVAL;
449 	}
450 
451 	return 0;
452 }
453 
dlpar_parse_id_type(char ** cmd,struct pseries_hp_errorlog * hp_elog)454 static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
455 {
456 	char *arg;
457 	u32 count, index;
458 
459 	arg = strsep(cmd, " ");
460 	if (!arg)
461 		return -EINVAL;
462 
463 	if (sysfs_streq(arg, "indexed-count")) {
464 		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
465 		arg = strsep(cmd, " ");
466 		if (!arg) {
467 			pr_err("No DRC count specified.\n");
468 			return -EINVAL;
469 		}
470 
471 		if (kstrtou32(arg, 0, &count)) {
472 			pr_err("Invalid DRC count specified.\n");
473 			return -EINVAL;
474 		}
475 
476 		arg = strsep(cmd, " ");
477 		if (!arg) {
478 			pr_err("No DRC Index specified.\n");
479 			return -EINVAL;
480 		}
481 
482 		if (kstrtou32(arg, 0, &index)) {
483 			pr_err("Invalid DRC Index specified.\n");
484 			return -EINVAL;
485 		}
486 
487 		hp_elog->_drc_u.ic.count = cpu_to_be32(count);
488 		hp_elog->_drc_u.ic.index = cpu_to_be32(index);
489 	} else if (sysfs_streq(arg, "index")) {
490 		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
491 		arg = strsep(cmd, " ");
492 		if (!arg) {
493 			pr_err("No DRC Index specified.\n");
494 			return -EINVAL;
495 		}
496 
497 		if (kstrtou32(arg, 0, &index)) {
498 			pr_err("Invalid DRC Index specified.\n");
499 			return -EINVAL;
500 		}
501 
502 		hp_elog->_drc_u.drc_index = cpu_to_be32(index);
503 	} else if (sysfs_streq(arg, "count")) {
504 		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
505 		arg = strsep(cmd, " ");
506 		if (!arg) {
507 			pr_err("No DRC count specified.\n");
508 			return -EINVAL;
509 		}
510 
511 		if (kstrtou32(arg, 0, &count)) {
512 			pr_err("Invalid DRC count specified.\n");
513 			return -EINVAL;
514 		}
515 
516 		hp_elog->_drc_u.drc_count = cpu_to_be32(count);
517 	} else {
518 		pr_err("Invalid id_type specified.\n");
519 		return -EINVAL;
520 	}
521 
522 	return 0;
523 }
524 
dlpar_store(struct class * class,struct class_attribute * attr,const char * buf,size_t count)525 static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
526 			   const char *buf, size_t count)
527 {
528 	struct pseries_hp_errorlog hp_elog;
529 	char *argbuf;
530 	char *args;
531 	int rc;
532 
533 	args = argbuf = kstrdup(buf, GFP_KERNEL);
534 	if (!argbuf)
535 		return -ENOMEM;
536 
537 	/*
538 	 * Parse out the request from the user, this will be in the form:
539 	 * <resource> <action> <id_type> <id>
540 	 */
541 	rc = dlpar_parse_resource(&args, &hp_elog);
542 	if (rc)
543 		goto dlpar_store_out;
544 
545 	rc = dlpar_parse_action(&args, &hp_elog);
546 	if (rc)
547 		goto dlpar_store_out;
548 
549 	rc = dlpar_parse_id_type(&args, &hp_elog);
550 	if (rc)
551 		goto dlpar_store_out;
552 
553 	rc = handle_dlpar_errorlog(&hp_elog);
554 
555 dlpar_store_out:
556 	kfree(argbuf);
557 
558 	if (rc)
559 		pr_err("Could not handle DLPAR request \"%s\"\n", buf);
560 
561 	return rc ? rc : count;
562 }
563 
dlpar_show(struct class * class,struct class_attribute * attr,char * buf)564 static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
565 			  char *buf)
566 {
567 	return sprintf(buf, "%s\n", "memory,cpu");
568 }
569 
570 static CLASS_ATTR_RW(dlpar);
571 
dlpar_workqueue_init(void)572 int __init dlpar_workqueue_init(void)
573 {
574 	if (pseries_hp_wq)
575 		return 0;
576 
577 	pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
578 			WQ_UNBOUND, 1);
579 
580 	return pseries_hp_wq ? 0 : -ENOMEM;
581 }
582 
dlpar_sysfs_init(void)583 static int __init dlpar_sysfs_init(void)
584 {
585 	int rc;
586 
587 	rc = dlpar_workqueue_init();
588 	if (rc)
589 		return rc;
590 
591 	return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
592 }
593 machine_device_initcall(pseries, dlpar_sysfs_init);
594 
595