Lines Matching defs:its
26 static int vgic_its_save_tables_v0(struct vgic_its *its);
27 static int vgic_its_restore_tables_v0(struct vgic_its *its);
28 static int vgic_its_commit_v0(struct vgic_its *its);
34 * If this LPI is already mapped on another ITS, we increase its refcount
171 int (*save_tables)(struct vgic_its *its);
172 int (*restore_tables)(struct vgic_its *its);
173 int (*commit)(struct vgic_its *its);
192 inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
194 return &its_table_abi_versions[its->abi_rev];
197 static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
201 its->abi_rev = rev;
202 abi = vgic_its_get_abi(its);
203 return abi->commit(its);
210 static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
214 list_for_each_entry(device, &its->device_list, dev_list)
226 static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
232 device = find_its_device(its, device_id);
244 #define for_each_lpi_its(dev, ite, its) \
245 list_for_each_entry(dev, &(its)->device_list, dev_list) \
259 static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
263 list_for_each_entry(collection, &its->collection_list, coll_list) {
402 static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
408 for_each_lpi_its(device, ite, its) {
477 struct vgic_its *its,
480 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
499 struct vgic_its *its,
504 val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
510 struct vgic_its *its,
518 return vgic_its_set_abi(its, rev);
522 struct vgic_its *its,
597 static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
620 db = its->vgic_its_base + GITS_TRANSLATER;
673 int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
679 if (!its->enabled)
682 ite = find_ite(its, devid, eventid);
693 vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
724 return iodev->its;
734 static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
741 err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
783 struct vgic_its *its;
789 its = vgic_msi_to_its(kvm, msi);
790 if (IS_ERR(its))
791 return PTR_ERR(its);
793 mutex_lock(&its->its_lock);
794 ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
795 mutex_unlock(&its->its_lock);
846 static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
853 ite = find_ite(its, device_id, event_id);
873 static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
883 ite = find_ite(its, device_id, event_id);
890 collection = find_collection(its, coll_id);
909 static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
955 if (kvm_read_guest_lock(its->dev->kvm,
978 idx = srcu_read_lock(&its->dev->kvm->srcu);
979 ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
980 srcu_read_unlock(&its->dev->kvm->srcu, idx);
984 static int vgic_its_alloc_collection(struct vgic_its *its,
990 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
1000 list_add_tail(&collection->coll_list, &its->collection_list);
1006 static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
1017 collection = find_collection(its, coll_id);
1021 for_each_lpi_its(device, ite, its)
1052 static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
1065 device = find_its_device(its, device_id);
1081 if (find_ite(its, device_id, event_id))
1084 collection = find_collection(its, coll_id);
1086 int ret = vgic_its_alloc_collection(its, &collection, coll_id);
1095 vgic_its_free_collection(its, coll_id);
1105 vgic_its_free_collection(its, coll_id);
1133 /* its lock must be held */
1134 static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
1138 list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
1142 /* its lock must be held */
1143 static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
1147 list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
1148 vgic_its_free_collection(its, cur->collection_id);
1152 static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
1167 list_add_tail(&device->dev_list, &its->device_list);
1175 static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1184 if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
1190 device = find_its_device(its, device_id);
1207 device = vgic_its_alloc_device(its, device_id, itt_addr,
1217 static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1233 vgic_its_free_collection(its, coll_id);
1236 collection = find_collection(its, coll_id);
1241 ret = vgic_its_alloc_collection(its, &collection,
1248 update_affinity_collection(kvm, its, collection);
1259 static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1267 ite = find_ite(its, device_id, event_id);
1284 static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1292 ite = find_ite(its, device_id, event_id);
1307 static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1317 collection = find_collection(its, coll_id);
1351 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1393 static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1399 return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1406 static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1411 mutex_lock(&its->its_lock);
1414 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1417 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1420 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1423 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1426 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1429 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1432 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1435 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1438 ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1441 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1444 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1451 mutex_unlock(&its->its_lock);
1493 struct vgic_its *its,
1496 return extract_bytes(its->cbaser, addr & 7, len);
1499 static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1504 if (its->enabled)
1507 mutex_lock(&its->cmd_lock);
1508 its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1509 its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1510 its->creadr = 0;
1515 its->cwriter = its->creadr;
1516 mutex_unlock(&its->cmd_lock);
1524 static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1530 if (!its->enabled)
1533 cbaser = GITS_CBASER_ADDRESS(its->cbaser);
1535 while (its->cwriter != its->creadr) {
1536 int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1546 vgic_its_handle_command(kvm, its, cmd_buf);
1548 its->creadr += ITS_CMD_SIZE;
1549 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1550 its->creadr = 0;
1560 static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1566 if (!its)
1569 mutex_lock(&its->cmd_lock);
1571 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1573 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1574 mutex_unlock(&its->cmd_lock);
1577 its->cwriter = reg;
1579 vgic_its_process_commands(kvm, its);
1581 mutex_unlock(&its->cmd_lock);
1585 struct vgic_its *its,
1588 return extract_bytes(its->cwriter, addr & 0x7, len);
1592 struct vgic_its *its,
1595 return extract_bytes(its->creadr, addr & 0x7, len);
1599 struct vgic_its *its,
1606 mutex_lock(&its->cmd_lock);
1608 if (its->enabled) {
1614 if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1619 its->creadr = cmd_offset;
1621 mutex_unlock(&its->cmd_lock);
1627 struct vgic_its *its,
1634 reg = its->baser_device_table;
1637 reg = its->baser_coll_table;
1649 struct vgic_its *its,
1653 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1658 if (its->enabled)
1663 regptr = &its->baser_device_table;
1668 regptr = &its->baser_coll_table;
1689 mutex_lock(&its->its_lock);
1692 vgic_its_free_device_list(kvm, its);
1695 vgic_its_free_collection_list(kvm, its);
1698 mutex_unlock(&its->its_lock);
1703 struct vgic_its *its,
1708 mutex_lock(&its->cmd_lock);
1709 if (its->creadr == its->cwriter)
1711 if (its->enabled)
1713 mutex_unlock(&its->cmd_lock);
1718 static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1722 mutex_lock(&its->cmd_lock);
1728 if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
1729 (!(its->baser_device_table & GITS_BASER_VALID) ||
1730 !(its->baser_coll_table & GITS_BASER_VALID) ||
1731 !(its->cbaser & GITS_CBASER_VALID)))
1734 its->enabled = !!(val & GITS_CTLR_ENABLE);
1735 if (!its->enabled)
1742 vgic_its_process_commands(kvm, its);
1745 mutex_unlock(&its->cmd_lock);
1767 static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1809 static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1812 struct vgic_io_device *iodev = &its->iodev;
1816 if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1821 its->vgic_its_base = addr;
1826 iodev->base_addr = its->vgic_its_base;
1828 iodev->its = its;
1891 struct vgic_its *its;
1896 its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1897 if (!its)
1903 kfree(its);
1910 mutex_init(&its->its_lock);
1911 mutex_init(&its->cmd_lock);
1913 its->vgic_its_base = VGIC_ADDR_UNDEF;
1915 INIT_LIST_HEAD(&its->device_list);
1916 INIT_LIST_HEAD(&its->collection_list);
1920 its->enabled = false;
1921 its->dev = dev;
1923 its->baser_device_table = INITIAL_BASER_VALUE |
1925 its->baser_coll_table = INITIAL_BASER_VALUE |
1929 dev->private = its;
1931 return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1937 struct vgic_its *its = kvm_dev->private;
1939 mutex_lock(&its->its_lock);
1941 vgic_its_free_device_list(kvm, its);
1942 vgic_its_free_collection_list(kvm, its);
1944 mutex_unlock(&its->its_lock);
1945 kfree(its);
1975 struct vgic_its *its;
1980 its = dev->private;
2000 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
2018 addr = its->vgic_its_base + offset;
2024 ret = region->uaccess_its_write(dev->kvm, its, addr,
2027 region->its_write(dev->kvm, its, addr, len, *reg);
2029 *reg = region->its_read(dev->kvm, its, addr, len);
2066 * @its: its handle
2074 typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
2081 * @its: its handle
2092 static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
2095 struct kvm *kvm = its->dev->kvm;
2112 next_offset = fn(its, id, entry, opaque);
2130 static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
2133 struct kvm *kvm = its->dev->kvm;
2151 static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
2156 struct kvm *kvm = its->dev->kvm;
2182 collection = find_collection(its, coll_id);
2213 static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2215 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2235 ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
2245 * @its: its handle
2250 static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2252 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2258 ret = scan_its_table(its, base, max_size, ite_esz, 0,
2271 * @its: ITS handle
2275 static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2278 struct kvm *kvm = its->dev->kvm;
2283 next_offset = compute_next_devid_offset(&its->device_list, dev);
2295 * @its: its handle
2303 static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2327 dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2331 ret = vgic_its_restore_itt(its, dev);
2333 vgic_its_free_device(its->dev->kvm, dev);
2359 static int vgic_its_save_device_tables(struct vgic_its *its)
2361 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2362 u64 baser = its->baser_device_table;
2369 list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2371 list_for_each_entry(dev, &its->device_list, dev_list) {
2375 if (!vgic_its_check_id(its, baser,
2379 ret = vgic_its_save_itt(its, dev);
2383 ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
2393 * @its: its handle
2402 static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2405 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2419 ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2429 static int vgic_its_restore_device_tables(struct vgic_its *its)
2431 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2432 u64 baser = its->baser_device_table;
2444 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2448 ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2459 static int vgic_its_save_cte(struct vgic_its *its,
2469 return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
2472 static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2475 struct kvm *kvm = its->dev->kvm;
2495 collection = find_collection(its, coll_id);
2498 ret = vgic_its_alloc_collection(its, &collection, coll_id);
2509 static int vgic_its_save_collection_table(struct vgic_its *its)
2511 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2512 u64 baser = its->baser_coll_table;
2524 list_for_each_entry(collection, &its->collection_list, coll_list) {
2525 ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
2541 ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
2550 static int vgic_its_restore_collection_table(struct vgic_its *its)
2552 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2553 u64 baser = its->baser_coll_table;
2567 ret = vgic_its_restore_cte(its, gpa, cte_esz);
2584 static int vgic_its_save_tables_v0(struct vgic_its *its)
2588 ret = vgic_its_save_device_tables(its);
2592 return vgic_its_save_collection_table(its);
2600 static int vgic_its_restore_tables_v0(struct vgic_its *its)
2604 ret = vgic_its_restore_collection_table(its);
2608 return vgic_its_restore_device_tables(its);
2611 static int vgic_its_commit_v0(struct vgic_its *its)
2615 abi = vgic_its_get_abi(its);
2616 its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2617 its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2619 its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2622 its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2627 static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2630 its->baser_coll_table &= ~GITS_BASER_VALID;
2631 its->baser_device_table &= ~GITS_BASER_VALID;
2632 its->cbaser = 0;
2633 its->creadr = 0;
2634 its->cwriter = 0;
2635 its->enabled = 0;
2636 vgic_its_free_device_list(kvm, its);
2637 vgic_its_free_collection_list(kvm, its);
2668 static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2670 const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2677 mutex_lock(&its->its_lock);
2680 mutex_unlock(&its->its_lock);
2687 vgic_its_reset(kvm, its);
2690 ret = abi->save_tables(its);
2693 ret = abi->restore_tables(its);
2698 mutex_unlock(&its->its_lock);
2706 struct vgic_its *its = dev->private;
2721 ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
2726 return vgic_register_its_iodev(dev->kvm, its, addr);
2729 return vgic_its_ctrl(dev->kvm, its, attr->attr);
2748 struct vgic_its *its = dev->private;
2749 u64 addr = its->vgic_its_base;
2778 .name = "kvm-arm-vgic-its",