Lines Matching defs:its

122 #define is_v4(its)		(!!((its)->typer & GITS_TYPER_VLPIS))
123 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
124 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
157 * translation table, and a list of interrupts. If it some of its
163 struct its_node *its;
205 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
207 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
217 struct its_node *its;
220 list_for_each_entry(its, &its_nodes, entry) {
221 if (!is_v4(its))
224 if (require_its_list_vmovp(vm, its))
225 __set_bit(its->list_nr, &its_list);
240 struct its_node *its = its_dev->its;
242 return its->collections + its_dev->event_map.col_map[event];
329 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
331 if (valid_col(its->collections + vpe->col_idx))
615 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
636 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
650 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
670 static struct its_collection *its_build_movi_cmd(struct its_node *its,
689 static struct its_collection *its_build_discard_cmd(struct its_node *its,
707 static struct its_collection *its_build_inv_cmd(struct its_node *its,
725 static struct its_collection *its_build_int_cmd(struct its_node *its,
743 static struct its_collection *its_build_clear_cmd(struct its_node *its,
761 static struct its_collection *its_build_invall_cmd(struct its_node *its,
773 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
782 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
785 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
798 if (is_v4_1(its)) {
807 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
813 if (!is_v4_1(its))
835 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
838 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
844 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
858 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
861 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
867 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
881 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
884 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
890 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
897 if (is_v4_1(its)) {
904 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
907 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
922 return valid_vpe(its, map->vpe);
925 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
940 return valid_vpe(its, map->vpe);
943 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
958 return valid_vpe(its, map->vpe);
961 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
965 if (WARN_ON(!is_v4_1(its)))
973 return valid_vpe(its, desc->its_invdb_cmd.vpe);
976 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
980 if (WARN_ON(!is_v4_1(its)))
993 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
996 static u64 its_cmd_ptr_to_offset(struct its_node *its,
999 return (ptr - its->cmd_base) * sizeof(*ptr);
1002 static int its_queue_full(struct its_node *its)
1007 widx = its->cmd_write - its->cmd_base;
1008 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
1017 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
1022 while (its_queue_full(its)) {
1032 cmd = its->cmd_write++;
1035 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1036 its->cmd_write = its->cmd_base;
1047 static struct its_cmd_block *its_post_commands(struct its_node *its)
1049 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1051 writel_relaxed(wr, its->base + GITS_CWRITER);
1053 return its->cmd_write;
1056 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1062 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1068 static int its_wait_for_range_completion(struct its_node *its,
1076 to_idx = its_cmd_ptr_to_offset(its, to);
1085 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1115 void name(struct its_node *its, \
1124 raw_spin_lock_irqsave(&its->lock, flags); \
1126 cmd = its_allocate_entry(its); \
1128 raw_spin_unlock_irqrestore(&its->lock, flags); \
1131 sync_obj = builder(its, cmd, desc); \
1132 its_flush_cmd(its, cmd); \
1135 sync_cmd = its_allocate_entry(its); \
1139 buildfn(its, sync_cmd, sync_obj); \
1140 its_flush_cmd(its, sync_cmd); \
1144 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1145 next_cmd = its_post_commands(its); \
1146 raw_spin_unlock_irqrestore(&its->lock, flags); \
1148 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1152 static void its_build_sync_cmd(struct its_node *its,
1165 static void its_build_vsync_cmd(struct its_node *its,
1185 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1195 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1205 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1215 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1218 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1226 its_send_single_command(its, its_build_mapc_cmd, &desc);
1237 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1249 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1259 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1262 static void its_send_invall(struct its_node *its, struct its_collection *col)
1268 its_send_single_command(its, its_build_invall_cmd, &desc);
1282 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1295 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1298 static void its_send_vmapp(struct its_node *its,
1305 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1307 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1313 struct its_node *its;
1320 its = list_first_entry(&its_nodes, struct its_node, entry);
1321 desc.its_vmovp_cmd.col = &its->collections[col_id];
1322 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1340 list_for_each_entry(its, &its_nodes, entry) {
1341 if (!is_v4(its))
1344 if (!require_its_list_vmovp(vpe->its_vm, its))
1347 desc.its_vmovp_cmd.col = &its->collections[col_id];
1348 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1354 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1359 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1373 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1387 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1401 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1404 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1409 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1481 WARN_ON(!is_v4_1(its_dev->its));
1499 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1517 if (is_v4_1(its_dev->its))
1530 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1610 node = its_dev->its->numa_node;
1642 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1660 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1698 target_col = &its_dev->its->collections[cpu];
1715 struct its_node *its = its_dev->its;
1717 return its->phys_base + GITS_TRANSLATER;
1723 struct its_node *its;
1726 its = its_dev->its;
1727 addr = its->get_msi_base(its_dev);
1786 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1799 vm->vlpi_count[its->list_nr]++;
1801 if (vm->vlpi_count[its->list_nr] == 1) {
1810 its_send_vmapp(its, vpe, true);
1811 its_send_vinvall(its, vpe);
1819 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1829 if (!--vm->vlpi_count[its->list_nr]) {
1833 its_send_vmapp(its, vm->vpes[i], false);
1875 its_map_vm(its_dev->its, info->map->vm);
1948 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1986 if (!is_v4(its_dev->its))
2306 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2308 u32 idx = baser - its->tables;
2310 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2313 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2316 u32 idx = baser - its->tables;
2318 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2319 baser->val = its_read_baser(its, baser);
2322 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2325 u64 val = its_read_baser(its, baser);
2337 &its->phys_base, its_base_type_string[type],
2343 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2390 its_write_baser(its, baser, val);
2410 &its->phys_base, its_base_type_string[type],
2422 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2431 static bool its_parse_indirect_baser(struct its_node *its,
2435 u64 tmp = its_read_baser(its, baser);
2449 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2477 &its->phys_base, its_base_type_string[type],
2478 device_ids(its), ids);
2496 static u32 compute_its_aff(struct its_node *its)
2506 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2508 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2514 struct its_node *its;
2522 list_for_each_entry(its, &its_nodes, entry) {
2525 if (!is_v4_1(its) || its == cur_its)
2528 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2531 if (aff != compute_its_aff(its))
2535 baser = its->tables[2].val;
2539 return its;
2545 static void its_free_tables(struct its_node *its)
2550 if (its->tables[i].base) {
2551 free_pages((unsigned long)its->tables[i].base,
2552 its->tables[i].order);
2553 its->tables[i].base = NULL;
2558 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2565 val = its_read_baser(its, baser);
2584 its_write_baser(its, baser, val);
2606 static int its_alloc_tables(struct its_node *its)
2612 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2616 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
2622 struct its_baser *baser = its->tables + i;
2623 u64 val = its_read_baser(its, baser);
2631 if (its_probe_baser_psz(its, baser)) {
2632 its_free_tables(its);
2640 indirect = its_parse_indirect_baser(its, baser, &order,
2641 device_ids(its));
2645 if (is_v4_1(its)) {
2649 if ((sibling = find_sibling_its(its))) {
2651 its_write_baser(its, baser, baser->val);
2656 indirect = its_parse_indirect_baser(its, baser, &order,
2661 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2663 its_free_tables(its);
2677 struct its_node *its;
2684 list_for_each_entry(its, &its_nodes, entry) {
2687 if (!is_v4_1(its))
2690 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2693 if (aff != compute_its_aff(its))
2697 baser = its->tables[2].val;
2702 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2754 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2964 static int its_alloc_collections(struct its_node *its)
2968 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2970 if (!its->collections)
2974 its->collections[i].target_address = ~0ULL;
3228 static void its_cpu_init_collection(struct its_node *its)
3233 /* avoid cross node collections and its mapping */
3234 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3238 if (its->numa_node != NUMA_NO_NODE &&
3239 its->numa_node != of_node_to_nid(cpu_node))
3244 * We now have to bind each collection to its target
3247 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3260 its->collections[cpu].target_address = target;
3261 its->collections[cpu].col_id = cpu;
3263 its_send_mapc(its, &its->collections[cpu], 1);
3264 its_send_invall(its, &its->collections[cpu]);
3269 struct its_node *its;
3273 list_for_each_entry(its, &its_nodes, entry)
3274 its_cpu_init_collection(its);
3279 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3284 raw_spin_lock_irqsave(&its->lock, flags);
3286 list_for_each_entry(tmp, &its->its_device_list, entry) {
3293 raw_spin_unlock_irqrestore(&its->lock, flags);
3298 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3303 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3304 return &its->tables[i];
3310 static bool its_alloc_table_entry(struct its_node *its,
3331 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3353 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3357 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3361 return (ilog2(dev_id) < device_ids(its));
3363 return its_alloc_table_entry(its, baser, dev_id);
3368 struct its_node *its;
3378 list_for_each_entry(its, &its_nodes, entry) {
3381 if (!is_v4(its))
3384 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3388 if (!its_alloc_table_entry(its, baser, vpe_id))
3408 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3421 if (!its_alloc_device_table(its, dev_id))
3433 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3435 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3457 dev->its = its;
3468 raw_spin_lock_irqsave(&its->lock, flags);
3469 list_add(&dev->entry, &its->its_device_list);
3470 raw_spin_unlock_irqrestore(&its->lock, flags);
3472 /* Map device to its ITT */
3482 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3484 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3509 struct its_node *its;
3524 its = msi_info->data;
3528 vpe_proxy.dev->its == its &&
3536 mutex_lock(&its->dev_alloc_lock);
3537 its_dev = its_find_device(its, dev_id);
3549 its_dev = its_create_device(its, dev_id, nvec, true);
3560 mutex_unlock(&its->dev_alloc_lock);
3598 struct its_node *its = its_dev->its;
3608 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3667 struct its_node *its = its_dev->its;
3681 mutex_lock(&its->dev_alloc_lock);
3699 mutex_unlock(&its->dev_alloc_lock);
3817 target_col = &vpe_proxy.dev->its->collections[to];
3837 * interrupt to its new location.
3914 * would be able to read its coarse map pretty quickly anyway,
3936 struct its_node *its;
3938 list_for_each_entry(its, &its_nodes, entry) {
3939 if (!is_v4(its))
3942 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3949 its_send_vinvall(its, vpe);
4070 static struct its_node *its = NULL;
4072 if (!its) {
4073 list_for_each_entry(its, &its_nodes, entry) {
4074 if (is_v4_1(its))
4075 return its;
4079 its = NULL;
4082 return its;
4088 struct its_node *its;
4093 * it to the first valid ITS, and let the HW do its magic.
4095 its = find_4_1_its();
4096 if (its)
4097 its_send_invdb(its, vpe);
4279 struct its_node *its = find_4_1_its();
4284 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4579 struct its_node *its;
4592 list_for_each_entry(its, &its_nodes, entry) {
4593 if (!is_v4(its))
4596 its_send_vmapp(its, vpe, true);
4597 its_send_vinvall(its, vpe);
4609 struct its_node *its;
4618 list_for_each_entry(its, &its_nodes, entry) {
4619 if (!is_v4(its))
4622 its_send_vmapp(its, vpe, false);
4677 struct its_node *its = data;
4680 its->typer &= ~GITS_TYPER_DEVBITS;
4681 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4682 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4689 struct its_node *its = data;
4691 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4698 struct its_node *its = data;
4701 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4702 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4709 struct its_node *its = its_dev->its;
4718 return its->pre_its_base + (its_dev->device_id << 2);
4723 struct its_node *its = data;
4727 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4728 "socionext,synquacer-pre-its",
4732 its->pre_its_base = pre_its_window[0];
4733 its->get_msi_base = its_irq_get_msi_base_pre_its;
4736 if (device_ids(its) > ids) {
4737 its->typer &= ~GITS_TYPER_DEVBITS;
4738 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4742 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI;
4750 struct its_node *its = data;
4756 its->vlpi_redist_offset = SZ_128K;
4762 struct its_node *its = data;
4768 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4776 struct its_node *its = data;
4778 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
4845 static void its_enable_quirks(struct its_node *its)
4847 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4849 gic_enable_quirks(iidr, its_quirks, its);
4851 if (is_of_node(its->fwnode_handle))
4852 gic_enable_of_quirks(to_of_node(its->fwnode_handle),
4853 its_quirks, its);
4858 struct its_node *its;
4862 list_for_each_entry(its, &its_nodes, entry) {
4865 base = its->base;
4866 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4870 &its->phys_base, err);
4871 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4875 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4880 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4883 base = its->base;
4884 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4894 struct its_node *its;
4898 list_for_each_entry(its, &its_nodes, entry) {
4902 base = its->base;
4916 &its->phys_base, ret);
4920 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4926 its->cmd_write = its->cmd_base;
4931 struct its_baser *baser = &its->tables[i];
4936 its_write_baser(its, baser, baser->val);
4938 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4945 if (its->collections[smp_processor_id()].col_id <
4947 its_cpu_init_collection(its);
4989 static int its_init_domain(struct its_node *its)
4999 info->data = its;
5002 its->msi_domain_flags, 0,
5003 its->fwnode_handle, &its_domain_ops,
5017 struct its_node *its;
5027 its = list_first_entry(&its_nodes, struct its_node, entry);
5036 devid = GENMASK(device_ids(its) - 1, 0);
5037 vpe_proxy.dev = its_create_device(its, devid, entries, false);
5054 static int __init its_compute_its_list_map(struct its_node *its)
5068 &its->phys_base);
5072 ctlr = readl_relaxed(its->base + GITS_CTLR);
5075 writel_relaxed(ctlr, its->base + GITS_CTLR);
5076 ctlr = readl_relaxed(its->base + GITS_CTLR);
5084 &its->phys_base, its_number);
5091 static int __init its_probe_one(struct its_node *its)
5098 its_enable_quirks(its);
5100 if (is_v4(its)) {
5101 if (!(its->typer & GITS_TYPER_VMOVP)) {
5102 err = its_compute_its_list_map(its);
5106 its->list_nr = err;
5109 &its->phys_base, err);
5111 pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base);
5114 if (is_v4_1(its)) {
5115 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
5117 its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K);
5118 if (!its->sgir_base) {
5123 its->mpidr = readl_relaxed(its->base + GITS_MPIDR);
5126 &its->phys_base, its->mpidr, svpet);
5130 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
5136 its->cmd_base = (void *)page_address(page);
5137 its->cmd_write = its->cmd_base;
5139 err = its_alloc_tables(its);
5143 err = its_alloc_collections(its);
5147 baser = (virt_to_phys(its->cmd_base) |
5153 gits_write_cbaser(baser, its->base + GITS_CBASER);
5154 tmp = gits_read_cbaser(its->base + GITS_CBASER);
5156 if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
5169 gits_write_cbaser(baser, its->base + GITS_CBASER);
5172 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5175 gits_write_cwriter(0, its->base + GITS_CWRITER);
5176 ctlr = readl_relaxed(its->base + GITS_CTLR);
5178 if (is_v4(its))
5180 writel_relaxed(ctlr, its->base + GITS_CTLR);
5182 err = its_init_domain(its);
5187 list_add(&its->entry, &its_nodes);
5193 its_free_tables(its);
5195 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5197 if (its->sgir_base)
5198 iounmap(its->sgir_base);
5200 pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err);
5357 { .compatible = "arm,gic-v3-its", },
5365 struct its_node *its;
5374 its = kzalloc(sizeof(*its), GFP_KERNEL);
5375 if (!its)
5378 raw_spin_lock_init(&its->lock);
5379 mutex_init(&its->dev_alloc_lock);
5380 INIT_LIST_HEAD(&its->entry);
5381 INIT_LIST_HEAD(&its->its_device_list);
5383 its->typer = gic_read_typer(its_base + GITS_TYPER);
5384 its->base = its_base;
5385 its->phys_base = res->start;
5386 its->get_msi_base = its_irq_get_msi_base;
5387 its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
5389 its->numa_node = numa_node;
5390 its->fwnode_handle = handle;
5392 return its;
5399 static void its_node_destroy(struct its_node *its)
5401 iounmap(its->base);
5402 kfree(its);
5431 struct its_node *its;
5447 its = its_node_init(&res, &np->fwnode, of_node_to_nid(np));
5448 if (!its)
5451 err = its_probe_one(its);
5453 its_node_destroy(its);
5567 struct its_node *its;
5592 its = its_node_init(&res, dom_handle,
5594 if (!its) {
5599 err = its_probe_one(its);
5672 struct its_node *its;
5695 list_for_each_entry(its, &its_nodes, entry) {
5696 has_v4 |= is_v4(its);
5697 has_v4_1 |= is_v4_1(its);