1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016, Semihalf
4 *	Author: Tomasz Nowicki <tn@semihalf.com>
5 *
6 * This file implements early detection/parsing of I/O mapping
7 * reported to OS through firmware via I/O Remapping Table (IORT)
8 * IORT document number: ARM DEN 0049A
9 */
10
11#define pr_fmt(fmt)	"ACPI: IORT: " fmt
12
13#include <linux/acpi_iort.h>
14#include <linux/bitfield.h>
15#include <linux/iommu.h>
16#include <linux/kernel.h>
17#include <linux/list.h>
18#include <linux/pci.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <linux/dma-map-ops.h>
22
23#define IORT_TYPE_MASK(type)	(1 << (type))
24#define IORT_MSI_TYPE		(1 << ACPI_IORT_NODE_ITS_GROUP)
25#define IORT_IOMMU_TYPE		((1 << ACPI_IORT_NODE_SMMU) |	\
26				(1 << ACPI_IORT_NODE_SMMU_V3))
27
28struct iort_its_msi_chip {
29	struct list_head	list;
30	struct fwnode_handle	*fw_node;
31	phys_addr_t		base_addr;
32	u32			translation_id;
33};
34
35struct iort_fwnode {
36	struct list_head list;
37	struct acpi_iort_node *iort_node;
38	struct fwnode_handle *fwnode;
39};
40static LIST_HEAD(iort_fwnode_list);
41static DEFINE_SPINLOCK(iort_fwnode_lock);
42
43/**
44 * iort_set_fwnode() - Create iort_fwnode and use it to register
45 *		       iommu data in the iort_fwnode_list
46 *
47 * @iort_node: IORT table node associated with the IOMMU
48 * @fwnode: fwnode associated with the IORT node
49 *
50 * Returns: 0 on success
51 *          <0 on failure
52 */
53static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
54				  struct fwnode_handle *fwnode)
55{
56	struct iort_fwnode *np;
57
58	np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
59
60	if (WARN_ON(!np))
61		return -ENOMEM;
62
63	INIT_LIST_HEAD(&np->list);
64	np->iort_node = iort_node;
65	np->fwnode = fwnode;
66
67	spin_lock(&iort_fwnode_lock);
68	list_add_tail(&np->list, &iort_fwnode_list);
69	spin_unlock(&iort_fwnode_lock);
70
71	return 0;
72}
73
74/**
75 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
76 *
77 * @node: IORT table node to be looked-up
78 *
79 * Returns: fwnode_handle pointer on success, NULL on failure
80 */
81static inline struct fwnode_handle *iort_get_fwnode(
82			struct acpi_iort_node *node)
83{
84	struct iort_fwnode *curr;
85	struct fwnode_handle *fwnode = NULL;
86
87	spin_lock(&iort_fwnode_lock);
88	list_for_each_entry(curr, &iort_fwnode_list, list) {
89		if (curr->iort_node == node) {
90			fwnode = curr->fwnode;
91			break;
92		}
93	}
94	spin_unlock(&iort_fwnode_lock);
95
96	return fwnode;
97}
98
99/**
100 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
101 *
102 * @node: IORT table node associated with fwnode to delete
103 */
104static inline void iort_delete_fwnode(struct acpi_iort_node *node)
105{
106	struct iort_fwnode *curr, *tmp;
107
108	spin_lock(&iort_fwnode_lock);
109	list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
110		if (curr->iort_node == node) {
111			list_del(&curr->list);
112			kfree(curr);
113			break;
114		}
115	}
116	spin_unlock(&iort_fwnode_lock);
117}
118
119/**
120 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
121 *
122 * @fwnode: fwnode associated with device to be looked-up
123 *
124 * Returns: iort_node pointer on success, NULL on failure
125 */
126static inline struct acpi_iort_node *iort_get_iort_node(
127			struct fwnode_handle *fwnode)
128{
129	struct iort_fwnode *curr;
130	struct acpi_iort_node *iort_node = NULL;
131
132	spin_lock(&iort_fwnode_lock);
133	list_for_each_entry(curr, &iort_fwnode_list, list) {
134		if (curr->fwnode == fwnode) {
135			iort_node = curr->iort_node;
136			break;
137		}
138	}
139	spin_unlock(&iort_fwnode_lock);
140
141	return iort_node;
142}
143
144typedef acpi_status (*iort_find_node_callback)
145	(struct acpi_iort_node *node, void *context);
146
147/* Root pointer to the mapped IORT table */
148static struct acpi_table_header *iort_table;
149
150static LIST_HEAD(iort_msi_chip_list);
151static DEFINE_SPINLOCK(iort_msi_chip_lock);
152
153/**
154 * iort_register_domain_token() - register domain token along with related
155 * ITS ID and base address to the list from where we can get it back later on.
156 * @trans_id: ITS ID.
157 * @base: ITS base address.
158 * @fw_node: Domain token.
159 *
160 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
161 */
162int iort_register_domain_token(int trans_id, phys_addr_t base,
163			       struct fwnode_handle *fw_node)
164{
165	struct iort_its_msi_chip *its_msi_chip;
166
167	its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
168	if (!its_msi_chip)
169		return -ENOMEM;
170
171	its_msi_chip->fw_node = fw_node;
172	its_msi_chip->translation_id = trans_id;
173	its_msi_chip->base_addr = base;
174
175	spin_lock(&iort_msi_chip_lock);
176	list_add(&its_msi_chip->list, &iort_msi_chip_list);
177	spin_unlock(&iort_msi_chip_lock);
178
179	return 0;
180}
181
182/**
183 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
184 * @trans_id: ITS ID.
185 *
186 * Returns: none.
187 */
188void iort_deregister_domain_token(int trans_id)
189{
190	struct iort_its_msi_chip *its_msi_chip, *t;
191
192	spin_lock(&iort_msi_chip_lock);
193	list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
194		if (its_msi_chip->translation_id == trans_id) {
195			list_del(&its_msi_chip->list);
196			kfree(its_msi_chip);
197			break;
198		}
199	}
200	spin_unlock(&iort_msi_chip_lock);
201}
202
203/**
204 * iort_find_domain_token() - Find domain token based on given ITS ID
205 * @trans_id: ITS ID.
206 *
207 * Returns: domain token when find on the list, NULL otherwise
208 */
209struct fwnode_handle *iort_find_domain_token(int trans_id)
210{
211	struct fwnode_handle *fw_node = NULL;
212	struct iort_its_msi_chip *its_msi_chip;
213
214	spin_lock(&iort_msi_chip_lock);
215	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
216		if (its_msi_chip->translation_id == trans_id) {
217			fw_node = its_msi_chip->fw_node;
218			break;
219		}
220	}
221	spin_unlock(&iort_msi_chip_lock);
222
223	return fw_node;
224}
225
226static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
227					     iort_find_node_callback callback,
228					     void *context)
229{
230	struct acpi_iort_node *iort_node, *iort_end;
231	struct acpi_table_iort *iort;
232	int i;
233
234	if (!iort_table)
235		return NULL;
236
237	/* Get the first IORT node */
238	iort = (struct acpi_table_iort *)iort_table;
239	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
240				 iort->node_offset);
241	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
242				iort_table->length);
243
244	for (i = 0; i < iort->node_count; i++) {
245		if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
246			       "IORT node pointer overflows, bad table!\n"))
247			return NULL;
248
249		if (iort_node->type == type &&
250		    ACPI_SUCCESS(callback(iort_node, context)))
251			return iort_node;
252
253		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
254					 iort_node->length);
255	}
256
257	return NULL;
258}
259
260static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
261					    void *context)
262{
263	struct device *dev = context;
264	acpi_status status = AE_NOT_FOUND;
265
266	if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
267		struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
268		struct acpi_device *adev;
269		struct acpi_iort_named_component *ncomp;
270		struct device *nc_dev = dev;
271
272		/*
273		 * Walk the device tree to find a device with an
274		 * ACPI companion; there is no point in scanning
275		 * IORT for a device matching a named component if
276		 * the device does not have an ACPI companion to
277		 * start with.
278		 */
279		do {
280			adev = ACPI_COMPANION(nc_dev);
281			if (adev)
282				break;
283
284			nc_dev = nc_dev->parent;
285		} while (nc_dev);
286
287		if (!adev)
288			goto out;
289
290		status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
291		if (ACPI_FAILURE(status)) {
292			dev_warn(nc_dev, "Can't get device full path name\n");
293			goto out;
294		}
295
296		ncomp = (struct acpi_iort_named_component *)node->node_data;
297		status = !strcmp(ncomp->device_name, buf.pointer) ?
298							AE_OK : AE_NOT_FOUND;
299		acpi_os_free(buf.pointer);
300	} else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
301		struct acpi_iort_root_complex *pci_rc;
302		struct pci_bus *bus;
303
304		bus = to_pci_bus(dev);
305		pci_rc = (struct acpi_iort_root_complex *)node->node_data;
306
307		/*
308		 * It is assumed that PCI segment numbers maps one-to-one
309		 * with root complexes. Each segment number can represent only
310		 * one root complex.
311		 */
312		status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
313							AE_OK : AE_NOT_FOUND;
314	}
315out:
316	return status;
317}
318
319static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
320		       u32 *rid_out, bool check_overlap)
321{
322	/* Single mapping does not care for input id */
323	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
324		if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
325		    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
326			*rid_out = map->output_base;
327			return 0;
328		}
329
330		pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
331			map, type);
332		return -ENXIO;
333	}
334
335	if (rid_in < map->input_base ||
336	    (rid_in > map->input_base + map->id_count))
337		return -ENXIO;
338
339	if (check_overlap) {
340		/*
341		 * We already found a mapping for this input ID at the end of
342		 * another region. If it coincides with the start of this
343		 * region, we assume the prior match was due to the off-by-1
344		 * issue mentioned below, and allow it to be superseded.
345		 * Otherwise, things are *really* broken, and we just disregard
346		 * duplicate matches entirely to retain compatibility.
347		 */
348		pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
349		       map, rid_in);
350		if (rid_in != map->input_base)
351			return -ENXIO;
352
353		pr_err(FW_BUG "applying workaround.\n");
354	}
355
356	*rid_out = map->output_base + (rid_in - map->input_base);
357
358	/*
359	 * Due to confusion regarding the meaning of the id_count field (which
360	 * carries the number of IDs *minus 1*), we may have to disregard this
361	 * match if it is at the end of the range, and overlaps with the start
362	 * of another one.
363	 */
364	if (map->id_count > 0 && rid_in == map->input_base + map->id_count)
365		return -EAGAIN;
366	return 0;
367}
368
369static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
370					       u32 *id_out, int index)
371{
372	struct acpi_iort_node *parent;
373	struct acpi_iort_id_mapping *map;
374
375	if (!node->mapping_offset || !node->mapping_count ||
376				     index >= node->mapping_count)
377		return NULL;
378
379	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
380			   node->mapping_offset + index * sizeof(*map));
381
382	/* Firmware bug! */
383	if (!map->output_reference) {
384		pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
385		       node, node->type);
386		return NULL;
387	}
388
389	parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
390			       map->output_reference);
391
392	if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
393		if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
394		    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
395		    node->type == ACPI_IORT_NODE_SMMU_V3 ||
396		    node->type == ACPI_IORT_NODE_PMCG) {
397			*id_out = map->output_base;
398			return parent;
399		}
400	}
401
402	return NULL;
403}
404
405static int iort_get_id_mapping_index(struct acpi_iort_node *node)
406{
407	struct acpi_iort_smmu_v3 *smmu;
408	struct acpi_iort_pmcg *pmcg;
409
410	switch (node->type) {
411	case ACPI_IORT_NODE_SMMU_V3:
412		/*
413		 * SMMUv3 dev ID mapping index was introduced in revision 1
414		 * table, not available in revision 0
415		 */
416		if (node->revision < 1)
417			return -EINVAL;
418
419		smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
420		/*
421		 * ID mapping index is only ignored if all interrupts are
422		 * GSIV based
423		 */
424		if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv
425		    && smmu->sync_gsiv)
426			return -EINVAL;
427
428		if (smmu->id_mapping_index >= node->mapping_count) {
429			pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
430			       node, node->type);
431			return -EINVAL;
432		}
433
434		return smmu->id_mapping_index;
435	case ACPI_IORT_NODE_PMCG:
436		pmcg = (struct acpi_iort_pmcg *)node->node_data;
437		if (pmcg->overflow_gsiv || node->mapping_count == 0)
438			return -EINVAL;
439
440		return 0;
441	default:
442		return -EINVAL;
443	}
444}
445
446static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
447					       u32 id_in, u32 *id_out,
448					       u8 type_mask)
449{
450	u32 id = id_in;
451
452	/* Parse the ID mapping tree to find specified node type */
453	while (node) {
454		struct acpi_iort_id_mapping *map;
455		int i, index, rc = 0;
456		u32 out_ref = 0, map_id = id;
457
458		if (IORT_TYPE_MASK(node->type) & type_mask) {
459			if (id_out)
460				*id_out = id;
461			return node;
462		}
463
464		if (!node->mapping_offset || !node->mapping_count)
465			goto fail_map;
466
467		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
468				   node->mapping_offset);
469
470		/* Firmware bug! */
471		if (!map->output_reference) {
472			pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
473			       node, node->type);
474			goto fail_map;
475		}
476
477		/*
478		 * Get the special ID mapping index (if any) and skip its
479		 * associated ID map to prevent erroneous multi-stage
480		 * IORT ID translations.
481		 */
482		index = iort_get_id_mapping_index(node);
483
484		/* Do the ID translation */
485		for (i = 0; i < node->mapping_count; i++, map++) {
486			/* if it is special mapping index, skip it */
487			if (i == index)
488				continue;
489
490			rc = iort_id_map(map, node->type, map_id, &id, out_ref);
491			if (!rc)
492				break;
493			if (rc == -EAGAIN)
494				out_ref = map->output_reference;
495		}
496
497		if (i == node->mapping_count && !out_ref)
498			goto fail_map;
499
500		node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
501				    rc ? out_ref : map->output_reference);
502	}
503
504fail_map:
505	/* Map input ID to output ID unchanged on mapping failure */
506	if (id_out)
507		*id_out = id_in;
508
509	return NULL;
510}
511
512static struct acpi_iort_node *iort_node_map_platform_id(
513		struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
514		int index)
515{
516	struct acpi_iort_node *parent;
517	u32 id;
518
519	/* step 1: retrieve the initial dev id */
520	parent = iort_node_get_id(node, &id, index);
521	if (!parent)
522		return NULL;
523
524	/*
525	 * optional step 2: map the initial dev id if its parent is not
526	 * the target type we want, map it again for the use cases such
527	 * as NC (named component) -> SMMU -> ITS. If the type is matched,
528	 * return the initial dev id and its parent pointer directly.
529	 */
530	if (!(IORT_TYPE_MASK(parent->type) & type_mask))
531		parent = iort_node_map_id(parent, id, id_out, type_mask);
532	else
533		if (id_out)
534			*id_out = id;
535
536	return parent;
537}
538
539static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
540{
541	struct pci_bus *pbus;
542
543	if (!dev_is_pci(dev)) {
544		struct acpi_iort_node *node;
545		/*
546		 * scan iort_fwnode_list to see if it's an iort platform
547		 * device (such as SMMU, PMCG),its iort node already cached
548		 * and associated with fwnode when iort platform devices
549		 * were initialized.
550		 */
551		node = iort_get_iort_node(dev->fwnode);
552		if (node)
553			return node;
554		/*
555		 * if not, then it should be a platform device defined in
556		 * DSDT/SSDT (with Named Component node in IORT)
557		 */
558		return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
559				      iort_match_node_callback, dev);
560	}
561
562	pbus = to_pci_dev(dev)->bus;
563
564	return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
565			      iort_match_node_callback, &pbus->dev);
566}
567
568/**
569 * iort_msi_map_id() - Map a MSI input ID for a device
570 * @dev: The device for which the mapping is to be done.
571 * @input_id: The device input ID.
572 *
573 * Returns: mapped MSI ID on success, input ID otherwise
574 */
575u32 iort_msi_map_id(struct device *dev, u32 input_id)
576{
577	struct acpi_iort_node *node;
578	u32 dev_id;
579
580	node = iort_find_dev_node(dev);
581	if (!node)
582		return input_id;
583
584	iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
585	return dev_id;
586}
587
588/**
589 * iort_pmsi_get_dev_id() - Get the device id for a device
590 * @dev: The device for which the mapping is to be done.
591 * @dev_id: The device ID found.
592 *
593 * Returns: 0 for successful find a dev id, -ENODEV on error
594 */
595int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
596{
597	int i, index;
598	struct acpi_iort_node *node;
599
600	node = iort_find_dev_node(dev);
601	if (!node)
602		return -ENODEV;
603
604	index = iort_get_id_mapping_index(node);
605	/* if there is a valid index, go get the dev_id directly */
606	if (index >= 0) {
607		if (iort_node_get_id(node, dev_id, index))
608			return 0;
609	} else {
610		for (i = 0; i < node->mapping_count; i++) {
611			if (iort_node_map_platform_id(node, dev_id,
612						      IORT_MSI_TYPE, i))
613				return 0;
614		}
615	}
616
617	return -ENODEV;
618}
619
620static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
621{
622	struct iort_its_msi_chip *its_msi_chip;
623	int ret = -ENODEV;
624
625	spin_lock(&iort_msi_chip_lock);
626	list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
627		if (its_msi_chip->translation_id == its_id) {
628			*base = its_msi_chip->base_addr;
629			ret = 0;
630			break;
631		}
632	}
633	spin_unlock(&iort_msi_chip_lock);
634
635	return ret;
636}
637
638/**
639 * iort_dev_find_its_id() - Find the ITS identifier for a device
640 * @dev: The device.
641 * @id: Device's ID
642 * @idx: Index of the ITS identifier list.
643 * @its_id: ITS identifier.
644 *
645 * Returns: 0 on success, appropriate error value otherwise
646 */
647static int iort_dev_find_its_id(struct device *dev, u32 id,
648				unsigned int idx, int *its_id)
649{
650	struct acpi_iort_its_group *its;
651	struct acpi_iort_node *node;
652
653	node = iort_find_dev_node(dev);
654	if (!node)
655		return -ENXIO;
656
657	node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
658	if (!node)
659		return -ENXIO;
660
661	/* Move to ITS specific data */
662	its = (struct acpi_iort_its_group *)node->node_data;
663	if (idx >= its->its_count) {
664		dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
665			idx, its->its_count);
666		return -ENXIO;
667	}
668
669	*its_id = its->identifiers[idx];
670	return 0;
671}
672
673/**
674 * iort_get_device_domain() - Find MSI domain related to a device
675 * @dev: The device.
676 * @id: Requester ID for the device.
677 * @bus_token: irq domain bus token.
678 *
679 * Returns: the MSI domain for this device, NULL otherwise
680 */
681struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
682					  enum irq_domain_bus_token bus_token)
683{
684	struct fwnode_handle *handle;
685	int its_id;
686
687	if (iort_dev_find_its_id(dev, id, 0, &its_id))
688		return NULL;
689
690	handle = iort_find_domain_token(its_id);
691	if (!handle)
692		return NULL;
693
694	return irq_find_matching_fwnode(handle, bus_token);
695}
696
697static void iort_set_device_domain(struct device *dev,
698				   struct acpi_iort_node *node)
699{
700	struct acpi_iort_its_group *its;
701	struct acpi_iort_node *msi_parent;
702	struct acpi_iort_id_mapping *map;
703	struct fwnode_handle *iort_fwnode;
704	struct irq_domain *domain;
705	int index;
706
707	index = iort_get_id_mapping_index(node);
708	if (index < 0)
709		return;
710
711	map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
712			   node->mapping_offset + index * sizeof(*map));
713
714	/* Firmware bug! */
715	if (!map->output_reference ||
716	    !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
717		pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
718		       node, node->type);
719		return;
720	}
721
722	msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
723				  map->output_reference);
724
725	if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
726		return;
727
728	/* Move to ITS specific data */
729	its = (struct acpi_iort_its_group *)msi_parent->node_data;
730
731	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
732	if (!iort_fwnode)
733		return;
734
735	domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
736	if (domain)
737		dev_set_msi_domain(dev, domain);
738}
739
740/**
741 * iort_get_platform_device_domain() - Find MSI domain related to a
742 * platform device
743 * @dev: the dev pointer associated with the platform device
744 *
745 * Returns: the MSI domain for this device, NULL otherwise
746 */
747static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
748{
749	struct acpi_iort_node *node, *msi_parent = NULL;
750	struct fwnode_handle *iort_fwnode;
751	struct acpi_iort_its_group *its;
752	int i;
753
754	/* find its associated iort node */
755	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
756			      iort_match_node_callback, dev);
757	if (!node)
758		return NULL;
759
760	/* then find its msi parent node */
761	for (i = 0; i < node->mapping_count; i++) {
762		msi_parent = iort_node_map_platform_id(node, NULL,
763						       IORT_MSI_TYPE, i);
764		if (msi_parent)
765			break;
766	}
767
768	if (!msi_parent)
769		return NULL;
770
771	/* Move to ITS specific data */
772	its = (struct acpi_iort_its_group *)msi_parent->node_data;
773
774	iort_fwnode = iort_find_domain_token(its->identifiers[0]);
775	if (!iort_fwnode)
776		return NULL;
777
778	return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
779}
780
781void acpi_configure_pmsi_domain(struct device *dev)
782{
783	struct irq_domain *msi_domain;
784
785	msi_domain = iort_get_platform_device_domain(dev);
786	if (msi_domain)
787		dev_set_msi_domain(dev, msi_domain);
788}
789
790#ifdef CONFIG_IOMMU_API
791static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
792{
793	struct acpi_iort_node *iommu;
794	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
795
796	iommu = iort_get_iort_node(fwspec->iommu_fwnode);
797
798	if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
799		struct acpi_iort_smmu_v3 *smmu;
800
801		smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
802		if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
803			return iommu;
804	}
805
806	return NULL;
807}
808
809static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
810{
811	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
812
813	return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
814}
815
816static inline int iort_add_device_replay(struct device *dev)
817{
818	int err = 0;
819
820	if (dev->bus && !device_iommu_mapped(dev))
821		err = iommu_probe_device(dev);
822
823	return err;
824}
825
826/**
827 * iort_iommu_msi_get_resv_regions - Reserved region driver helper
828 * @dev: Device from iommu_get_resv_regions()
829 * @head: Reserved region list from iommu_get_resv_regions()
830 *
831 * Returns: Number of msi reserved regions on success (0 if platform
832 *          doesn't require the reservation or no associated msi regions),
833 *          appropriate error value otherwise. The ITS interrupt translation
834 *          spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
835 *          are the msi reserved regions.
836 */
837int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
838{
839	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
840	struct acpi_iort_its_group *its;
841	struct acpi_iort_node *iommu_node, *its_node = NULL;
842	int i, resv = 0;
843
844	iommu_node = iort_get_msi_resv_iommu(dev);
845	if (!iommu_node)
846		return 0;
847
848	/*
849	 * Current logic to reserve ITS regions relies on HW topologies
850	 * where a given PCI or named component maps its IDs to only one
851	 * ITS group; if a PCI or named component can map its IDs to
852	 * different ITS groups through IORT mappings this function has
853	 * to be reworked to ensure we reserve regions for all ITS groups
854	 * a given PCI or named component may map IDs to.
855	 */
856
857	for (i = 0; i < fwspec->num_ids; i++) {
858		its_node = iort_node_map_id(iommu_node,
859					fwspec->ids[i],
860					NULL, IORT_MSI_TYPE);
861		if (its_node)
862			break;
863	}
864
865	if (!its_node)
866		return 0;
867
868	/* Move to ITS specific data */
869	its = (struct acpi_iort_its_group *)its_node->node_data;
870
871	for (i = 0; i < its->its_count; i++) {
872		phys_addr_t base;
873
874		if (!iort_find_its_base(its->identifiers[i], &base)) {
875			int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
876			struct iommu_resv_region *region;
877
878			region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
879							 prot, IOMMU_RESV_MSI);
880			if (region) {
881				list_add_tail(&region->list, head);
882				resv++;
883			}
884		}
885	}
886
887	return (resv == its->its_count) ? resv : -ENODEV;
888}
889
890static inline bool iort_iommu_driver_enabled(u8 type)
891{
892	switch (type) {
893	case ACPI_IORT_NODE_SMMU_V3:
894		return IS_ENABLED(CONFIG_ARM_SMMU_V3);
895	case ACPI_IORT_NODE_SMMU:
896		return IS_ENABLED(CONFIG_ARM_SMMU);
897	default:
898		pr_warn("IORT node type %u does not describe an SMMU\n", type);
899		return false;
900	}
901}
902
903static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
904			       struct fwnode_handle *fwnode,
905			       const struct iommu_ops *ops)
906{
907	int ret = iommu_fwspec_init(dev, fwnode, ops);
908
909	if (!ret)
910		ret = iommu_fwspec_add_ids(dev, &streamid, 1);
911
912	return ret;
913}
914
915static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
916{
917	struct acpi_iort_root_complex *pci_rc;
918
919	pci_rc = (struct acpi_iort_root_complex *)node->node_data;
920	return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
921}
922
923static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
924			    u32 streamid)
925{
926	const struct iommu_ops *ops;
927	struct fwnode_handle *iort_fwnode;
928
929	if (!node)
930		return -ENODEV;
931
932	iort_fwnode = iort_get_fwnode(node);
933	if (!iort_fwnode)
934		return -ENODEV;
935
936	/*
937	 * If the ops look-up fails, this means that either
938	 * the SMMU drivers have not been probed yet or that
939	 * the SMMU drivers are not built in the kernel;
940	 * Depending on whether the SMMU drivers are built-in
941	 * in the kernel or not, defer the IOMMU configuration
942	 * or just abort it.
943	 */
944	ops = iommu_ops_from_fwnode(iort_fwnode);
945	if (!ops)
946		return iort_iommu_driver_enabled(node->type) ?
947		       -EPROBE_DEFER : -ENODEV;
948
949	return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
950}
951
952struct iort_pci_alias_info {
953	struct device *dev;
954	struct acpi_iort_node *node;
955};
956
957static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
958{
959	struct iort_pci_alias_info *info = data;
960	struct acpi_iort_node *parent;
961	u32 streamid;
962
963	parent = iort_node_map_id(info->node, alias, &streamid,
964				  IORT_IOMMU_TYPE);
965	return iort_iommu_xlate(info->dev, parent, streamid);
966}
967
968static void iort_named_component_init(struct device *dev,
969				      struct acpi_iort_node *node)
970{
971	struct acpi_iort_named_component *nc;
972	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
973
974	if (!fwspec)
975		return;
976
977	nc = (struct acpi_iort_named_component *)node->node_data;
978	fwspec->num_pasid_bits = FIELD_GET(ACPI_IORT_NC_PASID_BITS,
979					   nc->node_flags);
980}
981
982static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
983{
984	struct acpi_iort_node *parent;
985	int err = -ENODEV, i = 0;
986	u32 streamid = 0;
987
988	do {
989
990		parent = iort_node_map_platform_id(node, &streamid,
991						   IORT_IOMMU_TYPE,
992						   i++);
993
994		if (parent)
995			err = iort_iommu_xlate(dev, parent, streamid);
996	} while (parent && !err);
997
998	return err;
999}
1000
1001static int iort_nc_iommu_map_id(struct device *dev,
1002				struct acpi_iort_node *node,
1003				const u32 *in_id)
1004{
1005	struct acpi_iort_node *parent;
1006	u32 streamid;
1007
1008	parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
1009	if (parent)
1010		return iort_iommu_xlate(dev, parent, streamid);
1011
1012	return -ENODEV;
1013}
1014
1015
1016/**
1017 * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
1018 *
1019 * @dev: device to configure
1020 * @id_in: optional input id const value pointer
1021 *
1022 * Returns: iommu_ops pointer on configuration success
1023 *          NULL on configuration failure
1024 */
1025const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
1026						const u32 *id_in)
1027{
1028	struct acpi_iort_node *node;
1029	const struct iommu_ops *ops;
1030	int err = -ENODEV;
1031
1032	/*
1033	 * If we already translated the fwspec there
1034	 * is nothing left to do, return the iommu_ops.
1035	 */
1036	ops = iort_fwspec_iommu_ops(dev);
1037	if (ops)
1038		return ops;
1039
1040	if (dev_is_pci(dev)) {
1041		struct iommu_fwspec *fwspec;
1042		struct pci_bus *bus = to_pci_dev(dev)->bus;
1043		struct iort_pci_alias_info info = { .dev = dev };
1044
1045		node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1046				      iort_match_node_callback, &bus->dev);
1047		if (!node)
1048			return NULL;
1049
1050		info.node = node;
1051		err = pci_for_each_dma_alias(to_pci_dev(dev),
1052					     iort_pci_iommu_init, &info);
1053
1054		fwspec = dev_iommu_fwspec_get(dev);
1055		if (fwspec && iort_pci_rc_supports_ats(node))
1056			fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
1057	} else {
1058		node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1059				      iort_match_node_callback, dev);
1060		if (!node)
1061			return NULL;
1062
1063		err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
1064			      iort_nc_iommu_map(dev, node);
1065
1066		if (!err)
1067			iort_named_component_init(dev, node);
1068	}
1069
1070	/*
1071	 * If we have reason to believe the IOMMU driver missed the initial
1072	 * add_device callback for dev, replay it to get things in order.
1073	 */
1074	if (!err) {
1075		ops = iort_fwspec_iommu_ops(dev);
1076		err = iort_add_device_replay(dev);
1077	}
1078
1079	/* Ignore all other errors apart from EPROBE_DEFER */
1080	if (err == -EPROBE_DEFER) {
1081		ops = ERR_PTR(err);
1082	} else if (err) {
1083		dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
1084		ops = NULL;
1085	}
1086
1087	return ops;
1088}
1089
1090#else
1091int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
1092{ return 0; }
1093const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
1094						const u32 *input_id)
1095{ return NULL; }
1096#endif
1097
1098static int nc_dma_get_range(struct device *dev, u64 *size)
1099{
1100	struct acpi_iort_node *node;
1101	struct acpi_iort_named_component *ncomp;
1102
1103	node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1104			      iort_match_node_callback, dev);
1105	if (!node)
1106		return -ENODEV;
1107
1108	ncomp = (struct acpi_iort_named_component *)node->node_data;
1109
1110	if (!ncomp->memory_address_limit) {
1111		pr_warn(FW_BUG "Named component missing memory address limit\n");
1112		return -EINVAL;
1113	}
1114
1115	*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1116			1ULL<<ncomp->memory_address_limit;
1117
1118	return 0;
1119}
1120
1121static int rc_dma_get_range(struct device *dev, u64 *size)
1122{
1123	struct acpi_iort_node *node;
1124	struct acpi_iort_root_complex *rc;
1125	struct pci_bus *pbus = to_pci_dev(dev)->bus;
1126
1127	node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1128			      iort_match_node_callback, &pbus->dev);
1129	if (!node || node->revision < 1)
1130		return -ENODEV;
1131
1132	rc = (struct acpi_iort_root_complex *)node->node_data;
1133
1134	if (!rc->memory_address_limit) {
1135		pr_warn(FW_BUG "Root complex missing memory address limit\n");
1136		return -EINVAL;
1137	}
1138
1139	*size = rc->memory_address_limit >= 64 ? U64_MAX :
1140			1ULL<<rc->memory_address_limit;
1141
1142	return 0;
1143}
1144
1145/**
1146 * iort_dma_get_ranges() - Look up DMA addressing limit for the device
1147 * @dev: device to lookup
1148 * @size: DMA range size result pointer
1149 *
1150 * Return: 0 on success, an error otherwise.
1151 */
1152int iort_dma_get_ranges(struct device *dev, u64 *size)
1153{
1154	if (dev_is_pci(dev))
1155		return rc_dma_get_range(dev, size);
1156	else
1157		return nc_dma_get_range(dev, size);
1158}
1159
1160static void __init acpi_iort_register_irq(int hwirq, const char *name,
1161					  int trigger,
1162					  struct resource *res)
1163{
1164	int irq = acpi_register_gsi(NULL, hwirq, trigger,
1165				    ACPI_ACTIVE_HIGH);
1166
1167	if (irq <= 0) {
1168		pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
1169								      name);
1170		return;
1171	}
1172
1173	res->start = irq;
1174	res->end = irq;
1175	res->flags = IORESOURCE_IRQ;
1176	res->name = name;
1177}
1178
1179static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
1180{
1181	struct acpi_iort_smmu_v3 *smmu;
1182	/* Always present mem resource */
1183	int num_res = 1;
1184
1185	/* Retrieve SMMUv3 specific data */
1186	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1187
1188	if (smmu->event_gsiv)
1189		num_res++;
1190
1191	if (smmu->pri_gsiv)
1192		num_res++;
1193
1194	if (smmu->gerr_gsiv)
1195		num_res++;
1196
1197	if (smmu->sync_gsiv)
1198		num_res++;
1199
1200	return num_res;
1201}
1202
1203static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
1204{
1205	/*
1206	 * Cavium ThunderX2 implementation doesn't not support unique
1207	 * irq line. Use single irq line for all the SMMUv3 interrupts.
1208	 */
1209	if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1210		return false;
1211
1212	/*
1213	 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1214	 * SPI numbers here.
1215	 */
1216	return smmu->event_gsiv == smmu->pri_gsiv &&
1217	       smmu->event_gsiv == smmu->gerr_gsiv &&
1218	       smmu->event_gsiv == smmu->sync_gsiv;
1219}
1220
1221static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
1222{
1223	/*
1224	 * Override the size, for Cavium ThunderX2 implementation
1225	 * which doesn't support the page 1 SMMU register space.
1226	 */
1227	if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1228		return SZ_64K;
1229
1230	return SZ_128K;
1231}
1232
1233static void __init arm_smmu_v3_init_resources(struct resource *res,
1234					      struct acpi_iort_node *node)
1235{
1236	struct acpi_iort_smmu_v3 *smmu;
1237	int num_res = 0;
1238
1239	/* Retrieve SMMUv3 specific data */
1240	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1241
1242	res[num_res].start = smmu->base_address;
1243	res[num_res].end = smmu->base_address +
1244				arm_smmu_v3_resource_size(smmu) - 1;
1245	res[num_res].flags = IORESOURCE_MEM;
1246
1247	num_res++;
1248	if (arm_smmu_v3_is_combined_irq(smmu)) {
1249		if (smmu->event_gsiv)
1250			acpi_iort_register_irq(smmu->event_gsiv, "combined",
1251					       ACPI_EDGE_SENSITIVE,
1252					       &res[num_res++]);
1253	} else {
1254
1255		if (smmu->event_gsiv)
1256			acpi_iort_register_irq(smmu->event_gsiv, "eventq",
1257					       ACPI_EDGE_SENSITIVE,
1258					       &res[num_res++]);
1259
1260		if (smmu->pri_gsiv)
1261			acpi_iort_register_irq(smmu->pri_gsiv, "priq",
1262					       ACPI_EDGE_SENSITIVE,
1263					       &res[num_res++]);
1264
1265		if (smmu->gerr_gsiv)
1266			acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
1267					       ACPI_EDGE_SENSITIVE,
1268					       &res[num_res++]);
1269
1270		if (smmu->sync_gsiv)
1271			acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
1272					       ACPI_EDGE_SENSITIVE,
1273					       &res[num_res++]);
1274	}
1275}
1276
1277static void __init arm_smmu_v3_dma_configure(struct device *dev,
1278					     struct acpi_iort_node *node)
1279{
1280	struct acpi_iort_smmu_v3 *smmu;
1281	enum dev_dma_attr attr;
1282
1283	/* Retrieve SMMUv3 specific data */
1284	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1285
1286	attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
1287			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1288
1289	/* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
1290	dev->dma_mask = &dev->coherent_dma_mask;
1291
1292	/* Configure DMA for the page table walker */
1293	acpi_dma_configure(dev, attr);
1294}
1295
1296#if defined(CONFIG_ACPI_NUMA)
1297/*
1298 * set numa proximity domain for smmuv3 device
1299 */
1300static int  __init arm_smmu_v3_set_proximity(struct device *dev,
1301					      struct acpi_iort_node *node)
1302{
1303	struct acpi_iort_smmu_v3 *smmu;
1304
1305	smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1306	if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1307		int dev_node = pxm_to_node(smmu->pxm);
1308
1309		if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
1310			return -EINVAL;
1311
1312		set_dev_node(dev, dev_node);
1313		pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1314			smmu->base_address,
1315			smmu->pxm);
1316	}
1317	return 0;
1318}
1319#else
1320#define arm_smmu_v3_set_proximity NULL
1321#endif
1322
1323static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
1324{
1325	struct acpi_iort_smmu *smmu;
1326
1327	/* Retrieve SMMU specific data */
1328	smmu = (struct acpi_iort_smmu *)node->node_data;
1329
1330	/*
1331	 * Only consider the global fault interrupt and ignore the
1332	 * configuration access interrupt.
1333	 *
1334	 * MMIO address and global fault interrupt resources are always
1335	 * present so add them to the context interrupt count as a static
1336	 * value.
1337	 */
1338	return smmu->context_interrupt_count + 2;
1339}
1340
1341static void __init arm_smmu_init_resources(struct resource *res,
1342					   struct acpi_iort_node *node)
1343{
1344	struct acpi_iort_smmu *smmu;
1345	int i, hw_irq, trigger, num_res = 0;
1346	u64 *ctx_irq, *glb_irq;
1347
1348	/* Retrieve SMMU specific data */
1349	smmu = (struct acpi_iort_smmu *)node->node_data;
1350
1351	res[num_res].start = smmu->base_address;
1352	res[num_res].end = smmu->base_address + smmu->span - 1;
1353	res[num_res].flags = IORESOURCE_MEM;
1354	num_res++;
1355
1356	glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1357	/* Global IRQs */
1358	hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1359	trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1360
1361	acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1362				     &res[num_res++]);
1363
1364	/* Context IRQs */
1365	ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1366	for (i = 0; i < smmu->context_interrupt_count; i++) {
1367		hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1368		trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1369
1370		acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1371				       &res[num_res++]);
1372	}
1373}
1374
1375static void __init arm_smmu_dma_configure(struct device *dev,
1376					  struct acpi_iort_node *node)
1377{
1378	struct acpi_iort_smmu *smmu;
1379	enum dev_dma_attr attr;
1380
1381	/* Retrieve SMMU specific data */
1382	smmu = (struct acpi_iort_smmu *)node->node_data;
1383
1384	attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
1385			DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1386
1387	/* We expect the dma masks to be equivalent for SMMU set-ups */
1388	dev->dma_mask = &dev->coherent_dma_mask;
1389
1390	/* Configure DMA for the page table walker */
1391	acpi_dma_configure(dev, attr);
1392}
1393
1394static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
1395{
1396	struct acpi_iort_pmcg *pmcg;
1397
1398	/* Retrieve PMCG specific data */
1399	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1400
1401	/*
1402	 * There are always 2 memory resources.
1403	 * If the overflow_gsiv is present then add that for a total of 3.
1404	 */
1405	return pmcg->overflow_gsiv ? 3 : 2;
1406}
1407
1408static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
1409						   struct acpi_iort_node *node)
1410{
1411	struct acpi_iort_pmcg *pmcg;
1412
1413	/* Retrieve PMCG specific data */
1414	pmcg = (struct acpi_iort_pmcg *)node->node_data;
1415
1416	res[0].start = pmcg->page0_base_address;
1417	res[0].end = pmcg->page0_base_address + SZ_4K - 1;
1418	res[0].flags = IORESOURCE_MEM;
1419	/*
1420	 * The initial version in DEN0049C lacked a way to describe register
1421	 * page 1, which makes it broken for most PMCG implementations; in
1422	 * that case, just let the driver fail gracefully if it expects to
1423	 * find a second memory resource.
1424	 */
1425	if (node->revision > 0) {
1426		res[1].start = pmcg->page1_base_address;
1427		res[1].end = pmcg->page1_base_address + SZ_4K - 1;
1428		res[1].flags = IORESOURCE_MEM;
1429	}
1430
1431	if (pmcg->overflow_gsiv)
1432		acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
1433				       ACPI_EDGE_SENSITIVE, &res[2]);
1434}
1435
1436static struct acpi_platform_list pmcg_plat_info[] __initdata = {
1437	/* HiSilicon Hip08 Platform */
1438	{"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1439	 "Erratum #162001800, Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP08},
1440	/* HiSilicon Hip09 Platform */
1441	{"HISI  ", "HIP09   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1442	 "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
1443	{ }
1444};
1445
1446static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
1447{
1448	u32 model;
1449	int idx;
1450
1451	idx = acpi_match_platform_list(pmcg_plat_info);
1452	if (idx >= 0)
1453		model = pmcg_plat_info[idx].data;
1454	else
1455		model = IORT_SMMU_V3_PMCG_GENERIC;
1456
1457	return platform_device_add_data(pdev, &model, sizeof(model));
1458}
1459
1460struct iort_dev_config {
1461	const char *name;
1462	int (*dev_init)(struct acpi_iort_node *node);
1463	void (*dev_dma_configure)(struct device *dev,
1464				  struct acpi_iort_node *node);
1465	int (*dev_count_resources)(struct acpi_iort_node *node);
1466	void (*dev_init_resources)(struct resource *res,
1467				     struct acpi_iort_node *node);
1468	int (*dev_set_proximity)(struct device *dev,
1469				    struct acpi_iort_node *node);
1470	int (*dev_add_platdata)(struct platform_device *pdev);
1471};
1472
1473static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
1474	.name = "arm-smmu-v3",
1475	.dev_dma_configure = arm_smmu_v3_dma_configure,
1476	.dev_count_resources = arm_smmu_v3_count_resources,
1477	.dev_init_resources = arm_smmu_v3_init_resources,
1478	.dev_set_proximity = arm_smmu_v3_set_proximity,
1479};
1480
1481static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
1482	.name = "arm-smmu",
1483	.dev_dma_configure = arm_smmu_dma_configure,
1484	.dev_count_resources = arm_smmu_count_resources,
1485	.dev_init_resources = arm_smmu_init_resources,
1486};
1487
1488static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
1489	.name = "arm-smmu-v3-pmcg",
1490	.dev_count_resources = arm_smmu_v3_pmcg_count_resources,
1491	.dev_init_resources = arm_smmu_v3_pmcg_init_resources,
1492	.dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
1493};
1494
1495static __init const struct iort_dev_config *iort_get_dev_cfg(
1496			struct acpi_iort_node *node)
1497{
1498	switch (node->type) {
1499	case ACPI_IORT_NODE_SMMU_V3:
1500		return &iort_arm_smmu_v3_cfg;
1501	case ACPI_IORT_NODE_SMMU:
1502		return &iort_arm_smmu_cfg;
1503	case ACPI_IORT_NODE_PMCG:
1504		return &iort_arm_smmu_v3_pmcg_cfg;
1505	default:
1506		return NULL;
1507	}
1508}
1509
1510/**
1511 * iort_add_platform_device() - Allocate a platform device for IORT node
1512 * @node: Pointer to device ACPI IORT node
1513 * @ops: Pointer to IORT device config struct
1514 *
1515 * Returns: 0 on success, <0 failure
1516 */
1517static int __init iort_add_platform_device(struct acpi_iort_node *node,
1518					   const struct iort_dev_config *ops)
1519{
1520	struct fwnode_handle *fwnode;
1521	struct platform_device *pdev;
1522	struct resource *r;
1523	int ret, count;
1524
1525	pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1526	if (!pdev)
1527		return -ENOMEM;
1528
1529	if (ops->dev_set_proximity) {
1530		ret = ops->dev_set_proximity(&pdev->dev, node);
1531		if (ret)
1532			goto dev_put;
1533	}
1534
1535	count = ops->dev_count_resources(node);
1536
1537	r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1538	if (!r) {
1539		ret = -ENOMEM;
1540		goto dev_put;
1541	}
1542
1543	ops->dev_init_resources(r, node);
1544
1545	ret = platform_device_add_resources(pdev, r, count);
1546	/*
1547	 * Resources are duplicated in platform_device_add_resources,
1548	 * free their allocated memory
1549	 */
1550	kfree(r);
1551
1552	if (ret)
1553		goto dev_put;
1554
1555	/*
1556	 * Platform devices based on PMCG nodes uses platform_data to
1557	 * pass the hardware model info to the driver. For others, add
1558	 * a copy of IORT node pointer to platform_data to be used to
1559	 * retrieve IORT data information.
1560	 */
1561	if (ops->dev_add_platdata)
1562		ret = ops->dev_add_platdata(pdev);
1563	else
1564		ret = platform_device_add_data(pdev, &node, sizeof(node));
1565
1566	if (ret)
1567		goto dev_put;
1568
1569	fwnode = iort_get_fwnode(node);
1570
1571	if (!fwnode) {
1572		ret = -ENODEV;
1573		goto dev_put;
1574	}
1575
1576	pdev->dev.fwnode = fwnode;
1577
1578	if (ops->dev_dma_configure)
1579		ops->dev_dma_configure(&pdev->dev, node);
1580
1581	iort_set_device_domain(&pdev->dev, node);
1582
1583	ret = platform_device_add(pdev);
1584	if (ret)
1585		goto dma_deconfigure;
1586
1587	return 0;
1588
1589dma_deconfigure:
1590	arch_teardown_dma_ops(&pdev->dev);
1591dev_put:
1592	platform_device_put(pdev);
1593
1594	return ret;
1595}
1596
1597#ifdef CONFIG_PCI
1598static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
1599{
1600	static bool acs_enabled __initdata;
1601
1602	if (acs_enabled)
1603		return;
1604
1605	if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1606		struct acpi_iort_node *parent;
1607		struct acpi_iort_id_mapping *map;
1608		int i;
1609
1610		map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1611				   iort_node->mapping_offset);
1612
1613		for (i = 0; i < iort_node->mapping_count; i++, map++) {
1614			if (!map->output_reference)
1615				continue;
1616
1617			parent = ACPI_ADD_PTR(struct acpi_iort_node,
1618					iort_table,  map->output_reference);
1619			/*
1620			 * If we detect a RC->SMMU mapping, make sure
1621			 * we enable ACS on the system.
1622			 */
1623			if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1624				(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1625				pci_request_acs();
1626				acs_enabled = true;
1627				return;
1628			}
1629		}
1630	}
1631}
1632#else
1633static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
1634#endif
1635
1636static void __init iort_init_platform_devices(void)
1637{
1638	struct acpi_iort_node *iort_node, *iort_end;
1639	struct acpi_table_iort *iort;
1640	struct fwnode_handle *fwnode;
1641	int i, ret;
1642	const struct iort_dev_config *ops;
1643
1644	/*
1645	 * iort_table and iort both point to the start of IORT table, but
1646	 * have different struct types
1647	 */
1648	iort = (struct acpi_table_iort *)iort_table;
1649
1650	/* Get the first IORT node */
1651	iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1652				 iort->node_offset);
1653	iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1654				iort_table->length);
1655
1656	for (i = 0; i < iort->node_count; i++) {
1657		if (iort_node >= iort_end) {
1658			pr_err("iort node pointer overflows, bad table\n");
1659			return;
1660		}
1661
1662		iort_enable_acs(iort_node);
1663
1664		ops = iort_get_dev_cfg(iort_node);
1665		if (ops) {
1666			fwnode = acpi_alloc_fwnode_static();
1667			if (!fwnode)
1668				return;
1669
1670			iort_set_fwnode(iort_node, fwnode);
1671
1672			ret = iort_add_platform_device(iort_node, ops);
1673			if (ret) {
1674				iort_delete_fwnode(iort_node);
1675				acpi_free_fwnode_static(fwnode);
1676				return;
1677			}
1678		}
1679
1680		iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1681					 iort_node->length);
1682	}
1683}
1684
1685void __init acpi_iort_init(void)
1686{
1687	acpi_status status;
1688
1689	/* iort_table will be used at runtime after the iort init,
1690	 * so we don't need to call acpi_put_table() to release
1691	 * the IORT table mapping.
1692	 */
1693	status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
1694	if (ACPI_FAILURE(status)) {
1695		if (status != AE_NOT_FOUND) {
1696			const char *msg = acpi_format_exception(status);
1697
1698			pr_err("Failed to get table, %s\n", msg);
1699		}
1700
1701		return;
1702	}
1703
1704	iort_init_platform_devices();
1705}
1706
1707#ifdef CONFIG_ZONE_DMA
1708/*
1709 * Extract the highest CPU physical address accessible to all DMA masters in
1710 * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
1711 */
1712phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
1713{
1714	phys_addr_t limit = PHYS_ADDR_MAX;
1715	struct acpi_iort_node *node, *end;
1716	struct acpi_table_iort *iort;
1717	acpi_status status;
1718	int i;
1719
1720	if (acpi_disabled)
1721		return limit;
1722
1723	status = acpi_get_table(ACPI_SIG_IORT, 0,
1724				(struct acpi_table_header **)&iort);
1725	if (ACPI_FAILURE(status))
1726		return limit;
1727
1728	node = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->node_offset);
1729	end = ACPI_ADD_PTR(struct acpi_iort_node, iort, iort->header.length);
1730
1731	for (i = 0; i < iort->node_count; i++) {
1732		if (node >= end)
1733			break;
1734
1735		switch (node->type) {
1736			struct acpi_iort_named_component *ncomp;
1737			struct acpi_iort_root_complex *rc;
1738			phys_addr_t local_limit;
1739
1740		case ACPI_IORT_NODE_NAMED_COMPONENT:
1741			ncomp = (struct acpi_iort_named_component *)node->node_data;
1742			local_limit = DMA_BIT_MASK(ncomp->memory_address_limit);
1743			limit = min_not_zero(limit, local_limit);
1744			break;
1745
1746		case ACPI_IORT_NODE_PCI_ROOT_COMPLEX:
1747			if (node->revision < 1)
1748				break;
1749
1750			rc = (struct acpi_iort_root_complex *)node->node_data;
1751			local_limit = DMA_BIT_MASK(rc->memory_address_limit);
1752			limit = min_not_zero(limit, local_limit);
1753			break;
1754		}
1755		node = ACPI_ADD_PTR(struct acpi_iort_node, node, node->length);
1756	}
1757	acpi_put_table(&iort->header);
1758	return limit;
1759}
1760#endif
1761