Lines Matching defs:its
120 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
121 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
122 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
155 * translation table, and a list of interrupts. If it some of its
161 struct its_node *its;
203 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
205 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
210 struct its_node *its;
213 list_for_each_entry(its, &its_nodes, entry) {
214 if (!is_v4(its))
217 if (require_its_list_vmovp(vm, its))
218 __set_bit(its->list_nr, &its_list);
233 struct its_node *its = its_dev->its;
235 return its->collections + its_dev->event_map.col_map[event];
322 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
324 if (valid_col(its->collections + vpe->col_idx))
608 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
629 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
643 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
663 static struct its_collection *its_build_movi_cmd(struct its_node *its,
682 static struct its_collection *its_build_discard_cmd(struct its_node *its,
700 static struct its_collection *its_build_inv_cmd(struct its_node *its,
718 static struct its_collection *its_build_int_cmd(struct its_node *its,
736 static struct its_collection *its_build_clear_cmd(struct its_node *its,
754 static struct its_collection *its_build_invall_cmd(struct its_node *its,
766 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
775 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
778 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
791 if (is_v4_1(its)) {
800 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
806 if (!is_v4_1(its))
823 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
826 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
832 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
846 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
849 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
855 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
869 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
872 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
878 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
885 if (is_v4_1(its)) {
892 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
895 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
910 return valid_vpe(its, map->vpe);
913 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
928 return valid_vpe(its, map->vpe);
931 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
946 return valid_vpe(its, map->vpe);
949 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
953 if (WARN_ON(!is_v4_1(its)))
961 return valid_vpe(its, desc->its_invdb_cmd.vpe);
964 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
968 if (WARN_ON(!is_v4_1(its)))
981 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
984 static u64 its_cmd_ptr_to_offset(struct its_node *its,
987 return (ptr - its->cmd_base) * sizeof(*ptr);
990 static int its_queue_full(struct its_node *its)
995 widx = its->cmd_write - its->cmd_base;
996 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
1005 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
1010 while (its_queue_full(its)) {
1020 cmd = its->cmd_write++;
1023 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1024 its->cmd_write = its->cmd_base;
1035 static struct its_cmd_block *its_post_commands(struct its_node *its)
1037 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1039 writel_relaxed(wr, its->base + GITS_CWRITER);
1041 return its->cmd_write;
1044 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1050 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1056 static int its_wait_for_range_completion(struct its_node *its,
1064 to_idx = its_cmd_ptr_to_offset(its, to);
1073 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1103 void name(struct its_node *its, \
1112 raw_spin_lock_irqsave(&its->lock, flags); \
1114 cmd = its_allocate_entry(its); \
1116 raw_spin_unlock_irqrestore(&its->lock, flags); \
1119 sync_obj = builder(its, cmd, desc); \
1120 its_flush_cmd(its, cmd); \
1123 sync_cmd = its_allocate_entry(its); \
1127 buildfn(its, sync_cmd, sync_obj); \
1128 its_flush_cmd(its, sync_cmd); \
1132 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1133 next_cmd = its_post_commands(its); \
1134 raw_spin_unlock_irqrestore(&its->lock, flags); \
1136 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1140 static void its_build_sync_cmd(struct its_node *its,
1153 static void its_build_vsync_cmd(struct its_node *its,
1173 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1183 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1193 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1203 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1206 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1214 its_send_single_command(its, its_build_mapc_cmd, &desc);
1225 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1237 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1247 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1250 static void its_send_invall(struct its_node *its, struct its_collection *col)
1256 its_send_single_command(its, its_build_invall_cmd, &desc);
1270 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1283 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1286 static void its_send_vmapp(struct its_node *its,
1293 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1295 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1301 struct its_node *its;
1308 its = list_first_entry(&its_nodes, struct its_node, entry);
1309 desc.its_vmovp_cmd.col = &its->collections[col_id];
1310 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1328 list_for_each_entry(its, &its_nodes, entry) {
1329 if (!is_v4(its))
1332 if (!require_its_list_vmovp(vpe->its_vm, its))
1335 desc.its_vmovp_cmd.col = &its->collections[col_id];
1336 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1342 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1347 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1361 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1375 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1389 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1392 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1397 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1469 WARN_ON(!is_v4_1(its_dev->its));
1487 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1505 if (is_v4_1(its_dev->its))
1518 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1599 node = its_dev->its->numa_node;
1628 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144))
1646 if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) &&
1684 target_col = &its_dev->its->collections[cpu];
1701 struct its_node *its = its_dev->its;
1703 return its->phys_base + GITS_TRANSLATER;
1709 struct its_node *its;
1712 its = its_dev->its;
1713 addr = its->get_msi_base(its_dev);
1772 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1785 vm->vlpi_count[its->list_nr]++;
1787 if (vm->vlpi_count[its->list_nr] == 1) {
1796 its_send_vmapp(its, vpe, true);
1797 its_send_vinvall(its, vpe);
1805 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1815 if (!--vm->vlpi_count[its->list_nr]) {
1819 its_send_vmapp(its, vm->vpes[i], false);
1861 its_map_vm(its_dev->its, info->map->vm);
1934 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1972 if (!is_v4(its_dev->its))
2292 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2294 u32 idx = baser - its->tables;
2296 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2299 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2302 u32 idx = baser - its->tables;
2304 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2305 baser->val = its_read_baser(its, baser);
2308 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2311 u64 val = its_read_baser(its, baser);
2323 &its->phys_base, its_base_type_string[type],
2329 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2373 its_write_baser(its, baser, val);
2394 &its->phys_base, its_base_type_string[type],
2406 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2415 static bool its_parse_indirect_baser(struct its_node *its,
2419 u64 tmp = its_read_baser(its, baser);
2433 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2461 &its->phys_base, its_base_type_string[type],
2462 device_ids(its), ids);
2480 static u32 compute_its_aff(struct its_node *its)
2490 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2492 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2498 struct its_node *its;
2506 list_for_each_entry(its, &its_nodes, entry) {
2509 if (!is_v4_1(its) || its == cur_its)
2512 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2515 if (aff != compute_its_aff(its))
2519 baser = its->tables[2].val;
2523 return its;
2529 static void its_free_tables(struct its_node *its)
2534 if (its->tables[i].base) {
2535 free_pages((unsigned long)its->tables[i].base,
2536 its->tables[i].order);
2537 its->tables[i].base = NULL;
2542 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2549 val = its_read_baser(its, baser);
2568 its_write_baser(its, baser, val);
2590 static int its_alloc_tables(struct its_node *its)
2596 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2601 struct its_baser *baser = its->tables + i;
2602 u64 val = its_read_baser(its, baser);
2610 if (its_probe_baser_psz(its, baser)) {
2611 its_free_tables(its);
2619 indirect = its_parse_indirect_baser(its, baser, &order,
2620 device_ids(its));
2624 if (is_v4_1(its)) {
2628 if ((sibling = find_sibling_its(its))) {
2630 its_write_baser(its, baser, baser->val);
2635 indirect = its_parse_indirect_baser(its, baser, &order,
2640 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2642 its_free_tables(its);
2656 struct its_node *its;
2663 list_for_each_entry(its, &its_nodes, entry) {
2666 if (!is_v4_1(its))
2669 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2672 if (aff != compute_its_aff(its))
2676 baser = its->tables[2].val;
2681 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2731 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2939 static int its_alloc_collections(struct its_node *its)
2943 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2945 if (!its->collections)
2949 its->collections[i].target_address = ~0ULL;
3188 static void its_cpu_init_collection(struct its_node *its)
3193 /* avoid cross node collections and its mapping */
3194 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3198 if (its->numa_node != NUMA_NO_NODE &&
3199 its->numa_node != of_node_to_nid(cpu_node))
3204 * We now have to bind each collection to its target
3207 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3220 its->collections[cpu].target_address = target;
3221 its->collections[cpu].col_id = cpu;
3223 its_send_mapc(its, &its->collections[cpu], 1);
3224 its_send_invall(its, &its->collections[cpu]);
3229 struct its_node *its;
3233 list_for_each_entry(its, &its_nodes, entry)
3234 its_cpu_init_collection(its);
3239 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3244 raw_spin_lock_irqsave(&its->lock, flags);
3246 list_for_each_entry(tmp, &its->its_device_list, entry) {
3253 raw_spin_unlock_irqrestore(&its->lock, flags);
3258 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3263 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3264 return &its->tables[i];
3270 static bool its_alloc_table_entry(struct its_node *its,
3291 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3313 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3317 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3321 return (ilog2(dev_id) < device_ids(its));
3323 return its_alloc_table_entry(its, baser, dev_id);
3328 struct its_node *its;
3338 list_for_each_entry(its, &its_nodes, entry) {
3341 if (!is_v4(its))
3344 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3348 if (!its_alloc_table_entry(its, baser, vpe_id))
3368 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3381 if (!its_alloc_device_table(its, dev_id))
3393 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3395 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3417 dev->its = its;
3428 raw_spin_lock_irqsave(&its->lock, flags);
3429 list_add(&dev->entry, &its->its_device_list);
3430 raw_spin_unlock_irqrestore(&its->lock, flags);
3432 /* Map device to its ITT */
3442 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3444 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3469 struct its_node *its;
3484 its = msi_info->data;
3488 vpe_proxy.dev->its == its &&
3496 mutex_lock(&its->dev_alloc_lock);
3497 its_dev = its_find_device(its, dev_id);
3509 its_dev = its_create_device(its, dev_id, nvec, true);
3517 mutex_unlock(&its->dev_alloc_lock);
3555 struct its_node *its = its_dev->its;
3565 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3623 struct its_node *its = its_dev->its;
3637 mutex_lock(&its->dev_alloc_lock);
3655 mutex_unlock(&its->dev_alloc_lock);
3773 target_col = &vpe_proxy.dev->its->collections[to];
3793 * interrupt to its new location.
3866 * would be able to read its coarse map pretty quickly anyway,
3890 struct its_node *its;
3892 list_for_each_entry(its, &its_nodes, entry) {
3893 if (!is_v4(its))
3896 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3903 its_send_vinvall(its, vpe);
4020 static struct its_node *its = NULL;
4022 if (!its) {
4023 list_for_each_entry(its, &its_nodes, entry) {
4024 if (is_v4_1(its))
4025 return its;
4029 its = NULL;
4032 return its;
4038 struct its_node *its;
4043 * it to the first valid ITS, and let the HW do its magic.
4045 its = find_4_1_its();
4046 if (its)
4047 its_send_invdb(its, vpe);
4227 struct its_node *its = find_4_1_its();
4232 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4521 struct its_node *its;
4534 list_for_each_entry(its, &its_nodes, entry) {
4535 if (!is_v4(its))
4538 its_send_vmapp(its, vpe, true);
4539 its_send_vinvall(its, vpe);
4551 struct its_node *its;
4560 list_for_each_entry(its, &its_nodes, entry) {
4561 if (!is_v4(its))
4564 its_send_vmapp(its, vpe, false);
4610 struct its_node *its = data;
4613 its->typer &= ~GITS_TYPER_DEVBITS;
4614 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4615 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4622 struct its_node *its = data;
4624 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4631 struct its_node *its = data;
4634 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4635 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4642 struct its_node *its = its_dev->its;
4651 return its->pre_its_base + (its_dev->device_id << 2);
4656 struct its_node *its = data;
4660 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4661 "socionext,synquacer-pre-its",
4665 its->pre_its_base = pre_its_window[0];
4666 its->get_msi_base = its_irq_get_msi_base_pre_its;
4669 if (device_ids(its) > ids) {
4670 its->typer &= ~GITS_TYPER_DEVBITS;
4671 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4675 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
4683 struct its_node *its = data;
4689 its->vlpi_redist_offset = SZ_128K;
4743 static void its_enable_quirks(struct its_node *its)
4745 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4747 gic_enable_quirks(iidr, its_quirks, its);
4752 struct its_node *its;
4756 list_for_each_entry(its, &its_nodes, entry) {
4759 base = its->base;
4760 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4764 &its->phys_base, err);
4765 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4769 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4774 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4777 base = its->base;
4778 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4788 struct its_node *its;
4792 list_for_each_entry(its, &its_nodes, entry) {
4796 base = its->base;
4810 &its->phys_base, ret);
4814 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4820 its->cmd_write = its->cmd_base;
4825 struct its_baser *baser = &its->tables[i];
4830 its_write_baser(its, baser, baser->val);
4832 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4839 if (its->collections[smp_processor_id()].col_id <
4841 its_cpu_init_collection(its);
4851 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
4860 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
4868 inner_domain->flags |= its->msi_domain_flags;
4870 info->data = its;
4878 struct its_node *its;
4888 its = list_first_entry(&its_nodes, struct its_node, entry);
4899 devid = GENMASK(device_ids(its) - 1, 0);
4900 vpe_proxy.dev = its_create_device(its, devid, entries, false);
4958 struct its_node *its;
4986 its = kzalloc(sizeof(*its), GFP_KERNEL);
4987 if (!its) {
4992 raw_spin_lock_init(&its->lock);
4993 mutex_init(&its->dev_alloc_lock);
4994 INIT_LIST_HEAD(&its->entry);
4995 INIT_LIST_HEAD(&its->its_device_list);
4997 its->typer = typer;
4998 its->base = its_base;
4999 its->phys_base = res->start;
5000 if (is_v4(its)) {
5006 its->list_nr = err;
5014 if (is_v4_1(its)) {
5017 its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
5018 if (!its->sgir_base) {
5023 its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
5026 &res->start, its->mpidr, svpet);
5030 its->numa_node = numa_node;
5032 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
5038 its->cmd_base = (void *)page_address(page);
5039 its->cmd_write = its->cmd_base;
5040 its->fwnode_handle = handle;
5041 its->get_msi_base = its_irq_get_msi_base;
5042 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
5044 its_enable_quirks(its);
5046 err = its_alloc_tables(its);
5050 err = its_alloc_collections(its);
5054 baser = (virt_to_phys(its->cmd_base) |
5060 gits_write_cbaser(baser, its->base + GITS_CBASER);
5061 tmp = gits_read_cbaser(its->base + GITS_CBASER);
5073 gits_write_cbaser(baser, its->base + GITS_CBASER);
5076 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
5079 gits_write_cwriter(0, its->base + GITS_CWRITER);
5080 ctlr = readl_relaxed(its->base + GITS_CTLR);
5082 if (is_v4(its))
5084 writel_relaxed(ctlr, its->base + GITS_CTLR);
5086 err = its_init_domain(handle, its);
5091 list_add(&its->entry, &its_nodes);
5097 its_free_tables(its);
5099 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
5101 if (its->sgir_base)
5102 iounmap(its->sgir_base);
5104 kfree(its);
5201 { .compatible = "arm,gic-v3-its", },
5389 struct its_node *its;
5412 list_for_each_entry(its, &its_nodes, entry) {
5413 has_v4 |= is_v4(its);
5414 has_v4_1 |= is_v4_1(its);