1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * pseries Memory Hotplug infrastructure.
4 *
5 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
6 */
7
8#define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt
9
10#include <linux/of.h>
11#include <linux/of_address.h>
12#include <linux/memblock.h>
13#include <linux/memory.h>
14#include <linux/memory_hotplug.h>
15#include <linux/slab.h>
16
17#include <asm/firmware.h>
18#include <asm/machdep.h>
19#include <asm/sparsemem.h>
20#include <asm/fadump.h>
21#include <asm/drmem.h>
22#include "pseries.h"
23
24static void dlpar_free_property(struct property *prop)
25{
26	kfree(prop->name);
27	kfree(prop->value);
28	kfree(prop);
29}
30
31static struct property *dlpar_clone_property(struct property *prop,
32					     u32 prop_size)
33{
34	struct property *new_prop;
35
36	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
37	if (!new_prop)
38		return NULL;
39
40	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
41	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
42	if (!new_prop->name || !new_prop->value) {
43		dlpar_free_property(new_prop);
44		return NULL;
45	}
46
47	memcpy(new_prop->value, prop->value, prop->length);
48	new_prop->length = prop_size;
49
50	of_property_set_flag(new_prop, OF_DYNAMIC);
51	return new_prop;
52}
53
54static bool find_aa_index(struct device_node *dr_node,
55			 struct property *ala_prop,
56			 const u32 *lmb_assoc, u32 *aa_index)
57{
58	u32 *assoc_arrays, new_prop_size;
59	struct property *new_prop;
60	int aa_arrays, aa_array_entries, aa_array_sz;
61	int i, index;
62
63	/*
64	 * The ibm,associativity-lookup-arrays property is defined to be
65	 * a 32-bit value specifying the number of associativity arrays
66	 * followed by a 32-bitvalue specifying the number of entries per
67	 * array, followed by the associativity arrays.
68	 */
69	assoc_arrays = ala_prop->value;
70
71	aa_arrays = be32_to_cpu(assoc_arrays[0]);
72	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
73	aa_array_sz = aa_array_entries * sizeof(u32);
74
75	for (i = 0; i < aa_arrays; i++) {
76		index = (i * aa_array_entries) + 2;
77
78		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
79			continue;
80
81		*aa_index = i;
82		return true;
83	}
84
85	new_prop_size = ala_prop->length + aa_array_sz;
86	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
87	if (!new_prop)
88		return false;
89
90	assoc_arrays = new_prop->value;
91
92	/* increment the number of entries in the lookup array */
93	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
94
95	/* copy the new associativity into the lookup array */
96	index = aa_arrays * aa_array_entries + 2;
97	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
98
99	of_update_property(dr_node, new_prop);
100
101	/*
102	 * The associativity lookup array index for this lmb is
103	 * number of entries - 1 since we added its associativity
104	 * to the end of the lookup array.
105	 */
106	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
107	return true;
108}
109
110static int update_lmb_associativity_index(struct drmem_lmb *lmb)
111{
112	struct device_node *parent, *lmb_node, *dr_node;
113	struct property *ala_prop;
114	const u32 *lmb_assoc;
115	u32 aa_index;
116	bool found;
117
118	parent = of_find_node_by_path("/");
119	if (!parent)
120		return -ENODEV;
121
122	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
123					     parent);
124	of_node_put(parent);
125	if (!lmb_node)
126		return -EINVAL;
127
128	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
129	if (!lmb_assoc) {
130		dlpar_free_cc_nodes(lmb_node);
131		return -ENODEV;
132	}
133
134	update_numa_distance(lmb_node);
135
136	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
137	if (!dr_node) {
138		dlpar_free_cc_nodes(lmb_node);
139		return -ENODEV;
140	}
141
142	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
143				    NULL);
144	if (!ala_prop) {
145		of_node_put(dr_node);
146		dlpar_free_cc_nodes(lmb_node);
147		return -ENODEV;
148	}
149
150	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
151
152	of_node_put(dr_node);
153	dlpar_free_cc_nodes(lmb_node);
154
155	if (!found) {
156		pr_err("Could not find LMB associativity\n");
157		return -1;
158	}
159
160	lmb->aa_index = aa_index;
161	return 0;
162}
163
164static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
165{
166	unsigned long section_nr;
167	struct memory_block *mem_block;
168
169	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
170
171	mem_block = find_memory_block(section_nr);
172	return mem_block;
173}
174
175static int get_lmb_range(u32 drc_index, int n_lmbs,
176			 struct drmem_lmb **start_lmb,
177			 struct drmem_lmb **end_lmb)
178{
179	struct drmem_lmb *lmb, *start, *end;
180	struct drmem_lmb *limit;
181
182	start = NULL;
183	for_each_drmem_lmb(lmb) {
184		if (lmb->drc_index == drc_index) {
185			start = lmb;
186			break;
187		}
188	}
189
190	if (!start)
191		return -EINVAL;
192
193	end = &start[n_lmbs];
194
195	limit = &drmem_info->lmbs[drmem_info->n_lmbs];
196	if (end > limit)
197		return -EINVAL;
198
199	*start_lmb = start;
200	*end_lmb = end;
201	return 0;
202}
203
204static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
205{
206	struct memory_block *mem_block;
207	int rc;
208
209	mem_block = lmb_to_memblock(lmb);
210	if (!mem_block)
211		return -EINVAL;
212
213	if (online && mem_block->dev.offline)
214		rc = device_online(&mem_block->dev);
215	else if (!online && !mem_block->dev.offline)
216		rc = device_offline(&mem_block->dev);
217	else
218		rc = 0;
219
220	put_device(&mem_block->dev);
221
222	return rc;
223}
224
225static int dlpar_online_lmb(struct drmem_lmb *lmb)
226{
227	return dlpar_change_lmb_state(lmb, true);
228}
229
230#ifdef CONFIG_MEMORY_HOTREMOVE
231static int dlpar_offline_lmb(struct drmem_lmb *lmb)
232{
233	return dlpar_change_lmb_state(lmb, false);
234}
235
236static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
237{
238	unsigned long start_pfn;
239	int sections_per_block;
240	int i;
241
242	start_pfn = base >> PAGE_SHIFT;
243
244	lock_device_hotplug();
245
246	if (!pfn_valid(start_pfn))
247		goto out;
248
249	sections_per_block = memory_block_size / MIN_MEMORY_BLOCK_SIZE;
250
251	for (i = 0; i < sections_per_block; i++) {
252		__remove_memory(base, MIN_MEMORY_BLOCK_SIZE);
253		base += MIN_MEMORY_BLOCK_SIZE;
254	}
255
256out:
257	/* Update memory regions for memory remove */
258	memblock_remove(base, memblock_size);
259	unlock_device_hotplug();
260	return 0;
261}
262
263static int pseries_remove_mem_node(struct device_node *np)
264{
265	int ret;
266	struct resource res;
267
268	/*
269	 * Check to see if we are actually removing memory
270	 */
271	if (!of_node_is_type(np, "memory"))
272		return 0;
273
274	/*
275	 * Find the base address and size of the memblock
276	 */
277	ret = of_address_to_resource(np, 0, &res);
278	if (ret)
279		return ret;
280
281	pseries_remove_memblock(res.start, resource_size(&res));
282	return 0;
283}
284
285static bool lmb_is_removable(struct drmem_lmb *lmb)
286{
287	if ((lmb->flags & DRCONF_MEM_RESERVED) ||
288		!(lmb->flags & DRCONF_MEM_ASSIGNED))
289		return false;
290
291#ifdef CONFIG_FA_DUMP
292	/*
293	 * Don't hot-remove memory that falls in fadump boot memory area
294	 * and memory that is reserved for capturing old kernel memory.
295	 */
296	if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
297		return false;
298#endif
299	/* device_offline() will determine if we can actually remove this lmb */
300	return true;
301}
302
303static int dlpar_add_lmb(struct drmem_lmb *);
304
305static int dlpar_remove_lmb(struct drmem_lmb *lmb)
306{
307	struct memory_block *mem_block;
308	int rc;
309
310	if (!lmb_is_removable(lmb))
311		return -EINVAL;
312
313	mem_block = lmb_to_memblock(lmb);
314	if (mem_block == NULL)
315		return -EINVAL;
316
317	rc = dlpar_offline_lmb(lmb);
318	if (rc) {
319		put_device(&mem_block->dev);
320		return rc;
321	}
322
323	__remove_memory(lmb->base_addr, memory_block_size);
324	put_device(&mem_block->dev);
325
326	/* Update memory regions for memory remove */
327	memblock_remove(lmb->base_addr, memory_block_size);
328
329	invalidate_lmb_associativity_index(lmb);
330	lmb->flags &= ~DRCONF_MEM_ASSIGNED;
331
332	return 0;
333}
334
335static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
336{
337	struct drmem_lmb *lmb;
338	int lmbs_reserved = 0;
339	int lmbs_available = 0;
340	int rc;
341
342	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
343
344	if (lmbs_to_remove == 0)
345		return -EINVAL;
346
347	/* Validate that there are enough LMBs to satisfy the request */
348	for_each_drmem_lmb(lmb) {
349		if (lmb_is_removable(lmb))
350			lmbs_available++;
351
352		if (lmbs_available == lmbs_to_remove)
353			break;
354	}
355
356	if (lmbs_available < lmbs_to_remove) {
357		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
358			lmbs_available, lmbs_to_remove);
359		return -EINVAL;
360	}
361
362	for_each_drmem_lmb(lmb) {
363		rc = dlpar_remove_lmb(lmb);
364		if (rc)
365			continue;
366
367		/* Mark this lmb so we can add it later if all of the
368		 * requested LMBs cannot be removed.
369		 */
370		drmem_mark_lmb_reserved(lmb);
371
372		lmbs_reserved++;
373		if (lmbs_reserved == lmbs_to_remove)
374			break;
375	}
376
377	if (lmbs_reserved != lmbs_to_remove) {
378		pr_err("Memory hot-remove failed, adding LMB's back\n");
379
380		for_each_drmem_lmb(lmb) {
381			if (!drmem_lmb_reserved(lmb))
382				continue;
383
384			rc = dlpar_add_lmb(lmb);
385			if (rc)
386				pr_err("Failed to add LMB back, drc index %x\n",
387				       lmb->drc_index);
388
389			drmem_remove_lmb_reservation(lmb);
390
391			lmbs_reserved--;
392			if (lmbs_reserved == 0)
393				break;
394		}
395
396		rc = -EINVAL;
397	} else {
398		for_each_drmem_lmb(lmb) {
399			if (!drmem_lmb_reserved(lmb))
400				continue;
401
402			dlpar_release_drc(lmb->drc_index);
403			pr_info("Memory at %llx was hot-removed\n",
404				lmb->base_addr);
405
406			drmem_remove_lmb_reservation(lmb);
407
408			lmbs_reserved--;
409			if (lmbs_reserved == 0)
410				break;
411		}
412		rc = 0;
413	}
414
415	return rc;
416}
417
418static int dlpar_memory_remove_by_index(u32 drc_index)
419{
420	struct drmem_lmb *lmb;
421	int lmb_found;
422	int rc;
423
424	pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
425
426	lmb_found = 0;
427	for_each_drmem_lmb(lmb) {
428		if (lmb->drc_index == drc_index) {
429			lmb_found = 1;
430			rc = dlpar_remove_lmb(lmb);
431			if (!rc)
432				dlpar_release_drc(lmb->drc_index);
433
434			break;
435		}
436	}
437
438	if (!lmb_found) {
439		pr_debug("Failed to look up LMB for drc index %x\n", drc_index);
440		rc = -EINVAL;
441	} else if (rc) {
442		pr_debug("Failed to hot-remove memory at %llx\n",
443			 lmb->base_addr);
444	} else {
445		pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
446	}
447
448	return rc;
449}
450
451static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
452{
453	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
454	int rc;
455
456	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
457		lmbs_to_remove, drc_index);
458
459	if (lmbs_to_remove == 0)
460		return -EINVAL;
461
462	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
463	if (rc)
464		return -EINVAL;
465
466	/*
467	 * Validate that all LMBs in range are not reserved. Note that it
468	 * is ok if they are !ASSIGNED since our goal here is to remove the
469	 * LMB range, regardless of whether some LMBs were already removed
470	 * by any other reason.
471	 *
472	 * This is a contrast to what is done in remove_by_count() where we
473	 * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
474	 * because we want to remove a fixed amount of LMBs in that function.
475	 */
476	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
477		if (lmb->flags & DRCONF_MEM_RESERVED) {
478			pr_err("Memory at %llx (drc index %x) is reserved\n",
479				lmb->base_addr, lmb->drc_index);
480			return -EINVAL;
481		}
482	}
483
484	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
485		/*
486		 * dlpar_remove_lmb() will error out if the LMB is already
487		 * !ASSIGNED, but this case is a no-op for us.
488		 */
489		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
490			continue;
491
492		rc = dlpar_remove_lmb(lmb);
493		if (rc)
494			break;
495
496		drmem_mark_lmb_reserved(lmb);
497	}
498
499	if (rc) {
500		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
501
502
503		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
504			if (!drmem_lmb_reserved(lmb))
505				continue;
506
507			/*
508			 * Setting the isolation state of an UNISOLATED/CONFIGURED
509			 * device to UNISOLATE is a no-op, but the hypervisor can
510			 * use it as a hint that the LMB removal failed.
511			 */
512			dlpar_unisolate_drc(lmb->drc_index);
513
514			rc = dlpar_add_lmb(lmb);
515			if (rc)
516				pr_err("Failed to add LMB, drc index %x\n",
517				       lmb->drc_index);
518
519			drmem_remove_lmb_reservation(lmb);
520		}
521		rc = -EINVAL;
522	} else {
523		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
524			if (!drmem_lmb_reserved(lmb))
525				continue;
526
527			dlpar_release_drc(lmb->drc_index);
528			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
529				lmb->base_addr, lmb->drc_index);
530
531			drmem_remove_lmb_reservation(lmb);
532		}
533	}
534
535	return rc;
536}
537
538#else
539static inline int pseries_remove_memblock(unsigned long base,
540					  unsigned long memblock_size)
541{
542	return -EOPNOTSUPP;
543}
544static inline int pseries_remove_mem_node(struct device_node *np)
545{
546	return 0;
547}
548static int dlpar_remove_lmb(struct drmem_lmb *lmb)
549{
550	return -EOPNOTSUPP;
551}
552static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
553{
554	return -EOPNOTSUPP;
555}
556static int dlpar_memory_remove_by_index(u32 drc_index)
557{
558	return -EOPNOTSUPP;
559}
560
561static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
562{
563	return -EOPNOTSUPP;
564}
565#endif /* CONFIG_MEMORY_HOTREMOVE */
566
567static int dlpar_add_lmb(struct drmem_lmb *lmb)
568{
569	unsigned long block_sz;
570	int nid, rc;
571
572	if (lmb->flags & DRCONF_MEM_ASSIGNED)
573		return -EINVAL;
574
575	rc = update_lmb_associativity_index(lmb);
576	if (rc) {
577		dlpar_release_drc(lmb->drc_index);
578		return rc;
579	}
580
581	block_sz = memory_block_size_bytes();
582
583	/* Find the node id for this LMB.  Fake one if necessary. */
584	nid = of_drconf_to_nid_single(lmb);
585	if (nid < 0 || !node_possible(nid))
586		nid = first_online_node;
587
588	/* Add the memory */
589	rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_MEMMAP_ON_MEMORY);
590	if (rc) {
591		invalidate_lmb_associativity_index(lmb);
592		return rc;
593	}
594
595	rc = dlpar_online_lmb(lmb);
596	if (rc) {
597		__remove_memory(lmb->base_addr, block_sz);
598		invalidate_lmb_associativity_index(lmb);
599	} else {
600		lmb->flags |= DRCONF_MEM_ASSIGNED;
601	}
602
603	return rc;
604}
605
606static int dlpar_memory_add_by_count(u32 lmbs_to_add)
607{
608	struct drmem_lmb *lmb;
609	int lmbs_available = 0;
610	int lmbs_reserved = 0;
611	int rc;
612
613	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
614
615	if (lmbs_to_add == 0)
616		return -EINVAL;
617
618	/* Validate that there are enough LMBs to satisfy the request */
619	for_each_drmem_lmb(lmb) {
620		if (lmb->flags & DRCONF_MEM_RESERVED)
621			continue;
622
623		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
624			lmbs_available++;
625
626		if (lmbs_available == lmbs_to_add)
627			break;
628	}
629
630	if (lmbs_available < lmbs_to_add)
631		return -EINVAL;
632
633	for_each_drmem_lmb(lmb) {
634		if (lmb->flags & DRCONF_MEM_ASSIGNED)
635			continue;
636
637		rc = dlpar_acquire_drc(lmb->drc_index);
638		if (rc)
639			continue;
640
641		rc = dlpar_add_lmb(lmb);
642		if (rc) {
643			dlpar_release_drc(lmb->drc_index);
644			continue;
645		}
646
647		/* Mark this lmb so we can remove it later if all of the
648		 * requested LMBs cannot be added.
649		 */
650		drmem_mark_lmb_reserved(lmb);
651		lmbs_reserved++;
652		if (lmbs_reserved == lmbs_to_add)
653			break;
654	}
655
656	if (lmbs_reserved != lmbs_to_add) {
657		pr_err("Memory hot-add failed, removing any added LMBs\n");
658
659		for_each_drmem_lmb(lmb) {
660			if (!drmem_lmb_reserved(lmb))
661				continue;
662
663			rc = dlpar_remove_lmb(lmb);
664			if (rc)
665				pr_err("Failed to remove LMB, drc index %x\n",
666				       lmb->drc_index);
667			else
668				dlpar_release_drc(lmb->drc_index);
669
670			drmem_remove_lmb_reservation(lmb);
671			lmbs_reserved--;
672
673			if (lmbs_reserved == 0)
674				break;
675		}
676		rc = -EINVAL;
677	} else {
678		for_each_drmem_lmb(lmb) {
679			if (!drmem_lmb_reserved(lmb))
680				continue;
681
682			pr_debug("Memory at %llx (drc index %x) was hot-added\n",
683				 lmb->base_addr, lmb->drc_index);
684			drmem_remove_lmb_reservation(lmb);
685			lmbs_reserved--;
686
687			if (lmbs_reserved == 0)
688				break;
689		}
690		rc = 0;
691	}
692
693	return rc;
694}
695
696static int dlpar_memory_add_by_index(u32 drc_index)
697{
698	struct drmem_lmb *lmb;
699	int rc, lmb_found;
700
701	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
702
703	lmb_found = 0;
704	for_each_drmem_lmb(lmb) {
705		if (lmb->drc_index == drc_index) {
706			lmb_found = 1;
707			rc = dlpar_acquire_drc(lmb->drc_index);
708			if (!rc) {
709				rc = dlpar_add_lmb(lmb);
710				if (rc)
711					dlpar_release_drc(lmb->drc_index);
712			}
713
714			break;
715		}
716	}
717
718	if (!lmb_found)
719		rc = -EINVAL;
720
721	if (rc)
722		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
723	else
724		pr_info("Memory at %llx (drc index %x) was hot-added\n",
725			lmb->base_addr, drc_index);
726
727	return rc;
728}
729
730static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
731{
732	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
733	int rc;
734
735	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
736		lmbs_to_add, drc_index);
737
738	if (lmbs_to_add == 0)
739		return -EINVAL;
740
741	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
742	if (rc)
743		return -EINVAL;
744
745	/* Validate that the LMBs in this range are not reserved */
746	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
747		/* Fail immediately if the whole range can't be hot-added */
748		if (lmb->flags & DRCONF_MEM_RESERVED) {
749			pr_err("Memory at %llx (drc index %x) is reserved\n",
750					lmb->base_addr, lmb->drc_index);
751			return -EINVAL;
752		}
753	}
754
755	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
756		if (lmb->flags & DRCONF_MEM_ASSIGNED)
757			continue;
758
759		rc = dlpar_acquire_drc(lmb->drc_index);
760		if (rc)
761			break;
762
763		rc = dlpar_add_lmb(lmb);
764		if (rc) {
765			dlpar_release_drc(lmb->drc_index);
766			break;
767		}
768
769		drmem_mark_lmb_reserved(lmb);
770	}
771
772	if (rc) {
773		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
774
775		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
776			if (!drmem_lmb_reserved(lmb))
777				continue;
778
779			rc = dlpar_remove_lmb(lmb);
780			if (rc)
781				pr_err("Failed to remove LMB, drc index %x\n",
782				       lmb->drc_index);
783			else
784				dlpar_release_drc(lmb->drc_index);
785
786			drmem_remove_lmb_reservation(lmb);
787		}
788		rc = -EINVAL;
789	} else {
790		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
791			if (!drmem_lmb_reserved(lmb))
792				continue;
793
794			pr_info("Memory at %llx (drc index %x) was hot-added\n",
795				lmb->base_addr, lmb->drc_index);
796			drmem_remove_lmb_reservation(lmb);
797		}
798	}
799
800	return rc;
801}
802
803int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
804{
805	u32 count, drc_index;
806	int rc;
807
808	lock_device_hotplug();
809
810	switch (hp_elog->action) {
811	case PSERIES_HP_ELOG_ACTION_ADD:
812		switch (hp_elog->id_type) {
813		case PSERIES_HP_ELOG_ID_DRC_COUNT:
814			count = hp_elog->_drc_u.drc_count;
815			rc = dlpar_memory_add_by_count(count);
816			break;
817		case PSERIES_HP_ELOG_ID_DRC_INDEX:
818			drc_index = hp_elog->_drc_u.drc_index;
819			rc = dlpar_memory_add_by_index(drc_index);
820			break;
821		case PSERIES_HP_ELOG_ID_DRC_IC:
822			count = hp_elog->_drc_u.ic.count;
823			drc_index = hp_elog->_drc_u.ic.index;
824			rc = dlpar_memory_add_by_ic(count, drc_index);
825			break;
826		default:
827			rc = -EINVAL;
828			break;
829		}
830
831		break;
832	case PSERIES_HP_ELOG_ACTION_REMOVE:
833		switch (hp_elog->id_type) {
834		case PSERIES_HP_ELOG_ID_DRC_COUNT:
835			count = hp_elog->_drc_u.drc_count;
836			rc = dlpar_memory_remove_by_count(count);
837			break;
838		case PSERIES_HP_ELOG_ID_DRC_INDEX:
839			drc_index = hp_elog->_drc_u.drc_index;
840			rc = dlpar_memory_remove_by_index(drc_index);
841			break;
842		case PSERIES_HP_ELOG_ID_DRC_IC:
843			count = hp_elog->_drc_u.ic.count;
844			drc_index = hp_elog->_drc_u.ic.index;
845			rc = dlpar_memory_remove_by_ic(count, drc_index);
846			break;
847		default:
848			rc = -EINVAL;
849			break;
850		}
851
852		break;
853	default:
854		pr_err("Invalid action (%d) specified\n", hp_elog->action);
855		rc = -EINVAL;
856		break;
857	}
858
859	if (!rc)
860		rc = drmem_update_dt();
861
862	unlock_device_hotplug();
863	return rc;
864}
865
866static int pseries_add_mem_node(struct device_node *np)
867{
868	int ret;
869	struct resource res;
870
871	/*
872	 * Check to see if we are actually adding memory
873	 */
874	if (!of_node_is_type(np, "memory"))
875		return 0;
876
877	/*
878	 * Find the base and size of the memblock
879	 */
880	ret = of_address_to_resource(np, 0, &res);
881	if (ret)
882		return ret;
883
884	/*
885	 * Update memory region to represent the memory add
886	 */
887	ret = memblock_add(res.start, resource_size(&res));
888	return (ret < 0) ? -EINVAL : 0;
889}
890
891static int pseries_memory_notifier(struct notifier_block *nb,
892				   unsigned long action, void *data)
893{
894	struct of_reconfig_data *rd = data;
895	int err = 0;
896
897	switch (action) {
898	case OF_RECONFIG_ATTACH_NODE:
899		err = pseries_add_mem_node(rd->dn);
900		break;
901	case OF_RECONFIG_DETACH_NODE:
902		err = pseries_remove_mem_node(rd->dn);
903		break;
904	case OF_RECONFIG_UPDATE_PROPERTY:
905		if (!strcmp(rd->dn->name,
906			    "ibm,dynamic-reconfiguration-memory"))
907			drmem_update_lmbs(rd->prop);
908	}
909	return notifier_from_errno(err);
910}
911
912static struct notifier_block pseries_mem_nb = {
913	.notifier_call = pseries_memory_notifier,
914};
915
916static int __init pseries_memory_hotplug_init(void)
917{
918	if (firmware_has_feature(FW_FEATURE_LPAR))
919		of_reconfig_notifier_register(&pseries_mem_nb);
920
921	return 0;
922}
923machine_device_initcall(pseries, pseries_memory_hotplug_init);
924