1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Support for dynamic reconfiguration for PCI, Memory, and CPU
4 * Hotplug and Dynamic Logical Partitioning on RPA platforms.
5 *
6 * Copyright (C) 2009 Nathan Fontenot
7 * Copyright (C) 2009 IBM Corporation
8 */
9
10#define pr_fmt(fmt)	"dlpar: " fmt
11
12#include <linux/kernel.h>
13#include <linux/notifier.h>
14#include <linux/spinlock.h>
15#include <linux/cpu.h>
16#include <linux/slab.h>
17#include <linux/of.h>
18
19#include "of_helpers.h"
20#include "pseries.h"
21
22#include <asm/prom.h>
23#include <asm/machdep.h>
24#include <linux/uaccess.h>
25#include <asm/rtas.h>
26
27static struct workqueue_struct *pseries_hp_wq;
28
29struct pseries_hp_work {
30	struct work_struct work;
31	struct pseries_hp_errorlog *errlog;
32};
33
34struct cc_workarea {
35	__be32	drc_index;
36	__be32	zero;
37	__be32	name_offset;
38	__be32	prop_length;
39	__be32	prop_offset;
40};
41
42void dlpar_free_cc_property(struct property *prop)
43{
44	kfree(prop->name);
45	kfree(prop->value);
46	kfree(prop);
47}
48
49static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
50{
51	struct property *prop;
52	char *name;
53	char *value;
54
55	prop = kzalloc(sizeof(*prop), GFP_KERNEL);
56	if (!prop)
57		return NULL;
58
59	name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
60	prop->name = kstrdup(name, GFP_KERNEL);
61	if (!prop->name) {
62		dlpar_free_cc_property(prop);
63		return NULL;
64	}
65
66	prop->length = be32_to_cpu(ccwa->prop_length);
67	value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
68	prop->value = kmemdup(value, prop->length, GFP_KERNEL);
69	if (!prop->value) {
70		dlpar_free_cc_property(prop);
71		return NULL;
72	}
73
74	return prop;
75}
76
77static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
78{
79	struct device_node *dn;
80	const char *name;
81
82	dn = kzalloc(sizeof(*dn), GFP_KERNEL);
83	if (!dn)
84		return NULL;
85
86	name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset);
87	dn->full_name = kstrdup(name, GFP_KERNEL);
88	if (!dn->full_name) {
89		kfree(dn);
90		return NULL;
91	}
92
93	of_node_set_flag(dn, OF_DYNAMIC);
94	of_node_init(dn);
95
96	return dn;
97}
98
99static void dlpar_free_one_cc_node(struct device_node *dn)
100{
101	struct property *prop;
102
103	while (dn->properties) {
104		prop = dn->properties;
105		dn->properties = prop->next;
106		dlpar_free_cc_property(prop);
107	}
108
109	kfree(dn->full_name);
110	kfree(dn);
111}
112
113void dlpar_free_cc_nodes(struct device_node *dn)
114{
115	if (dn->child)
116		dlpar_free_cc_nodes(dn->child);
117
118	if (dn->sibling)
119		dlpar_free_cc_nodes(dn->sibling);
120
121	dlpar_free_one_cc_node(dn);
122}
123
124#define COMPLETE	0
125#define NEXT_SIBLING    1
126#define NEXT_CHILD      2
127#define NEXT_PROPERTY   3
128#define PREV_PARENT     4
129#define MORE_MEMORY     5
130#define ERR_CFG_USE     -9003
131
132struct device_node *dlpar_configure_connector(__be32 drc_index,
133					      struct device_node *parent)
134{
135	struct device_node *dn;
136	struct device_node *first_dn = NULL;
137	struct device_node *last_dn = NULL;
138	struct property *property;
139	struct property *last_property = NULL;
140	struct cc_workarea *ccwa;
141	char *data_buf;
142	int cc_token;
143	int rc = -1;
144
145	cc_token = rtas_token("ibm,configure-connector");
146	if (cc_token == RTAS_UNKNOWN_SERVICE)
147		return NULL;
148
149	data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
150	if (!data_buf)
151		return NULL;
152
153	ccwa = (struct cc_workarea *)&data_buf[0];
154	ccwa->drc_index = drc_index;
155	ccwa->zero = 0;
156
157	do {
158		/* Since we release the rtas_data_buf lock between configure
159		 * connector calls we want to re-populate the rtas_data_buffer
160		 * with the contents of the previous call.
161		 */
162		spin_lock(&rtas_data_buf_lock);
163
164		memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
165		rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
166		memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
167
168		spin_unlock(&rtas_data_buf_lock);
169
170		if (rtas_busy_delay(rc))
171			continue;
172
173		switch (rc) {
174		case COMPLETE:
175			break;
176
177		case NEXT_SIBLING:
178			dn = dlpar_parse_cc_node(ccwa);
179			if (!dn)
180				goto cc_error;
181
182			dn->parent = last_dn->parent;
183			last_dn->sibling = dn;
184			last_dn = dn;
185			break;
186
187		case NEXT_CHILD:
188			dn = dlpar_parse_cc_node(ccwa);
189			if (!dn)
190				goto cc_error;
191
192			if (!first_dn) {
193				dn->parent = parent;
194				first_dn = dn;
195			} else {
196				dn->parent = last_dn;
197				if (last_dn)
198					last_dn->child = dn;
199			}
200
201			last_dn = dn;
202			break;
203
204		case NEXT_PROPERTY:
205			property = dlpar_parse_cc_property(ccwa);
206			if (!property)
207				goto cc_error;
208
209			if (!last_dn->properties)
210				last_dn->properties = property;
211			else
212				last_property->next = property;
213
214			last_property = property;
215			break;
216
217		case PREV_PARENT:
218			last_dn = last_dn->parent;
219			break;
220
221		case MORE_MEMORY:
222		case ERR_CFG_USE:
223		default:
224			printk(KERN_ERR "Unexpected Error (%d) "
225			       "returned from configure-connector\n", rc);
226			goto cc_error;
227		}
228	} while (rc);
229
230cc_error:
231	kfree(data_buf);
232
233	if (rc) {
234		if (first_dn)
235			dlpar_free_cc_nodes(first_dn);
236
237		return NULL;
238	}
239
240	return first_dn;
241}
242
243int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
244{
245	int rc;
246
247	dn->parent = parent;
248
249	rc = of_attach_node(dn);
250	if (rc) {
251		printk(KERN_ERR "Failed to add device node %pOF\n", dn);
252		return rc;
253	}
254
255	return 0;
256}
257
258int dlpar_detach_node(struct device_node *dn)
259{
260	struct device_node *child;
261	int rc;
262
263	child = of_get_next_child(dn, NULL);
264	while (child) {
265		dlpar_detach_node(child);
266		child = of_get_next_child(dn, child);
267	}
268
269	rc = of_detach_node(dn);
270	if (rc)
271		return rc;
272
273	of_node_put(dn);
274
275	return 0;
276}
277
278#define DR_ENTITY_SENSE		9003
279#define DR_ENTITY_PRESENT	1
280#define DR_ENTITY_UNUSABLE	2
281#define ALLOCATION_STATE	9003
282#define ALLOC_UNUSABLE		0
283#define ALLOC_USABLE		1
284#define ISOLATION_STATE		9001
285#define ISOLATE			0
286#define UNISOLATE		1
287
288int dlpar_acquire_drc(u32 drc_index)
289{
290	int dr_status, rc;
291
292	rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
293		       DR_ENTITY_SENSE, drc_index);
294	if (rc || dr_status != DR_ENTITY_UNUSABLE)
295		return -1;
296
297	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
298	if (rc)
299		return rc;
300
301	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
302	if (rc) {
303		rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
304		return rc;
305	}
306
307	return 0;
308}
309
310int dlpar_release_drc(u32 drc_index)
311{
312	int dr_status, rc;
313
314	rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
315		       DR_ENTITY_SENSE, drc_index);
316	if (rc || dr_status != DR_ENTITY_PRESENT)
317		return -1;
318
319	rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
320	if (rc)
321		return rc;
322
323	rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
324	if (rc) {
325		rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
326		return rc;
327	}
328
329	return 0;
330}
331
332int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
333{
334	int rc;
335
336	/* pseries error logs are in BE format, convert to cpu type */
337	switch (hp_elog->id_type) {
338	case PSERIES_HP_ELOG_ID_DRC_COUNT:
339		hp_elog->_drc_u.drc_count =
340				be32_to_cpu(hp_elog->_drc_u.drc_count);
341		break;
342	case PSERIES_HP_ELOG_ID_DRC_INDEX:
343		hp_elog->_drc_u.drc_index =
344				be32_to_cpu(hp_elog->_drc_u.drc_index);
345		break;
346	case PSERIES_HP_ELOG_ID_DRC_IC:
347		hp_elog->_drc_u.ic.count =
348				be32_to_cpu(hp_elog->_drc_u.ic.count);
349		hp_elog->_drc_u.ic.index =
350				be32_to_cpu(hp_elog->_drc_u.ic.index);
351	}
352
353	switch (hp_elog->resource) {
354	case PSERIES_HP_ELOG_RESOURCE_MEM:
355		rc = dlpar_memory(hp_elog);
356		break;
357	case PSERIES_HP_ELOG_RESOURCE_CPU:
358		rc = dlpar_cpu(hp_elog);
359		break;
360	case PSERIES_HP_ELOG_RESOURCE_PMEM:
361		rc = dlpar_hp_pmem(hp_elog);
362		break;
363
364	default:
365		pr_warn_ratelimited("Invalid resource (%d) specified\n",
366				    hp_elog->resource);
367		rc = -EINVAL;
368	}
369
370	return rc;
371}
372
373static void pseries_hp_work_fn(struct work_struct *work)
374{
375	struct pseries_hp_work *hp_work =
376			container_of(work, struct pseries_hp_work, work);
377
378	handle_dlpar_errorlog(hp_work->errlog);
379
380	kfree(hp_work->errlog);
381	kfree((void *)work);
382}
383
384void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
385{
386	struct pseries_hp_work *work;
387	struct pseries_hp_errorlog *hp_errlog_copy;
388
389	hp_errlog_copy = kmemdup(hp_errlog, sizeof(*hp_errlog), GFP_ATOMIC);
390	if (!hp_errlog_copy)
391		return;
392
393	work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC);
394	if (work) {
395		INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
396		work->errlog = hp_errlog_copy;
397		queue_work(pseries_hp_wq, (struct work_struct *)work);
398	} else {
399		kfree(hp_errlog_copy);
400	}
401}
402
403static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
404{
405	char *arg;
406
407	arg = strsep(cmd, " ");
408	if (!arg)
409		return -EINVAL;
410
411	if (sysfs_streq(arg, "memory")) {
412		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
413	} else if (sysfs_streq(arg, "cpu")) {
414		hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
415	} else {
416		pr_err("Invalid resource specified.\n");
417		return -EINVAL;
418	}
419
420	return 0;
421}
422
423static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
424{
425	char *arg;
426
427	arg = strsep(cmd, " ");
428	if (!arg)
429		return -EINVAL;
430
431	if (sysfs_streq(arg, "add")) {
432		hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
433	} else if (sysfs_streq(arg, "remove")) {
434		hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
435	} else {
436		pr_err("Invalid action specified.\n");
437		return -EINVAL;
438	}
439
440	return 0;
441}
442
443static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
444{
445	char *arg;
446	u32 count, index;
447
448	arg = strsep(cmd, " ");
449	if (!arg)
450		return -EINVAL;
451
452	if (sysfs_streq(arg, "indexed-count")) {
453		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
454		arg = strsep(cmd, " ");
455		if (!arg) {
456			pr_err("No DRC count specified.\n");
457			return -EINVAL;
458		}
459
460		if (kstrtou32(arg, 0, &count)) {
461			pr_err("Invalid DRC count specified.\n");
462			return -EINVAL;
463		}
464
465		arg = strsep(cmd, " ");
466		if (!arg) {
467			pr_err("No DRC Index specified.\n");
468			return -EINVAL;
469		}
470
471		if (kstrtou32(arg, 0, &index)) {
472			pr_err("Invalid DRC Index specified.\n");
473			return -EINVAL;
474		}
475
476		hp_elog->_drc_u.ic.count = cpu_to_be32(count);
477		hp_elog->_drc_u.ic.index = cpu_to_be32(index);
478	} else if (sysfs_streq(arg, "index")) {
479		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
480		arg = strsep(cmd, " ");
481		if (!arg) {
482			pr_err("No DRC Index specified.\n");
483			return -EINVAL;
484		}
485
486		if (kstrtou32(arg, 0, &index)) {
487			pr_err("Invalid DRC Index specified.\n");
488			return -EINVAL;
489		}
490
491		hp_elog->_drc_u.drc_index = cpu_to_be32(index);
492	} else if (sysfs_streq(arg, "count")) {
493		hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
494		arg = strsep(cmd, " ");
495		if (!arg) {
496			pr_err("No DRC count specified.\n");
497			return -EINVAL;
498		}
499
500		if (kstrtou32(arg, 0, &count)) {
501			pr_err("Invalid DRC count specified.\n");
502			return -EINVAL;
503		}
504
505		hp_elog->_drc_u.drc_count = cpu_to_be32(count);
506	} else {
507		pr_err("Invalid id_type specified.\n");
508		return -EINVAL;
509	}
510
511	return 0;
512}
513
514static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
515			   const char *buf, size_t count)
516{
517	struct pseries_hp_errorlog hp_elog;
518	char *argbuf;
519	char *args;
520	int rc;
521
522	args = argbuf = kstrdup(buf, GFP_KERNEL);
523	if (!argbuf) {
524		pr_info("Could not allocate resources for DLPAR operation\n");
525		kfree(argbuf);
526		return -ENOMEM;
527	}
528
529	/*
530	 * Parse out the request from the user, this will be in the form:
531	 * <resource> <action> <id_type> <id>
532	 */
533	rc = dlpar_parse_resource(&args, &hp_elog);
534	if (rc)
535		goto dlpar_store_out;
536
537	rc = dlpar_parse_action(&args, &hp_elog);
538	if (rc)
539		goto dlpar_store_out;
540
541	rc = dlpar_parse_id_type(&args, &hp_elog);
542	if (rc)
543		goto dlpar_store_out;
544
545	rc = handle_dlpar_errorlog(&hp_elog);
546
547dlpar_store_out:
548	kfree(argbuf);
549
550	if (rc)
551		pr_err("Could not handle DLPAR request \"%s\"\n", buf);
552
553	return rc ? rc : count;
554}
555
556static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
557			  char *buf)
558{
559	return sprintf(buf, "%s\n", "memory,cpu");
560}
561
562static CLASS_ATTR_RW(dlpar);
563
564int __init dlpar_workqueue_init(void)
565{
566	if (pseries_hp_wq)
567		return 0;
568
569	pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
570			WQ_UNBOUND, 1);
571
572	return pseries_hp_wq ? 0 : -ENOMEM;
573}
574
575static int __init dlpar_sysfs_init(void)
576{
577	int rc;
578
579	rc = dlpar_workqueue_init();
580	if (rc)
581		return rc;
582
583	return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
584}
585machine_device_initcall(pseries, dlpar_sysfs_init);
586
587