Lines Matching defs:xive

6 #define pr_fmt(fmt) "xive-kvm: " fmt
21 #include <asm/xive.h>
22 #include <asm/xive-regs.h>
248 struct kvmppc_xive *xive = xc->xive;
257 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
263 memset(qpage, 0, 1 << xive->q_order);
273 xive->q_order, true);
280 /* Called with xive->lock held */
283 struct kvmppc_xive *xive = kvm->arch.xive;
287 lockdep_assert_held(&xive->lock);
290 if (xive->qmap & (1 << prio))
300 if (rc == 0 && !xive->single_escalation)
302 xive->single_escalation);
309 xive->qmap |= (1 << prio);
391 static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
436 kvmppc_xive_vp(xive, state->act_server),
471 static void xive_finish_unmask(struct kvmppc_xive *xive,
492 kvmppc_xive_vp(xive, state->act_server),
532 struct kvmppc_xive *xive = kvm->arch.xive;
569 kvmppc_xive_vp(xive, server),
616 struct kvmppc_xive *xive = kvm->arch.xive;
623 if (!xive)
631 mutex_lock(&xive->lock);
632 rc = xive_check_provisioning(xive->kvm,
634 mutex_unlock(&xive->lock);
641 sb = kvmppc_xive_find_source(xive, irq, &idx);
661 xive_lock_and_mask(xive, sb, state);
702 xive_finish_unmask(xive, sb, state, priority);
717 struct kvmppc_xive *xive = kvm->arch.xive;
722 if (!xive)
725 sb = kvmppc_xive_find_source(xive, irq, &idx);
739 struct kvmppc_xive *xive = kvm->arch.xive;
744 if (!xive)
747 sb = kvmppc_xive_find_source(xive, irq, &idx);
770 xive_finish_unmask(xive, sb, state, state->saved_priority);
778 struct kvmppc_xive *xive = kvm->arch.xive;
783 if (!xive)
786 sb = kvmppc_xive_find_source(xive, irq, &idx);
796 state->saved_priority = xive_lock_and_mask(xive, sb, state);
802 static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
808 sb = kvmppc_xive_find_source(xive, irq, &idx);
840 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
844 if (!xc || !xive)
884 * case, we keep that info and increment a counter in the xive to
887 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
889 xive->delayed_irqs++;
899 struct kvmppc_xive *xive = kvm->arch.xive;
909 if (!xive)
914 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
943 prio = xive_lock_and_mask(xive, sb, state);
954 if (xive->ops && xive->ops->reset_mapped)
955 xive->ops->reset_mapped(kvm, guest_irq);
968 kvmppc_xive_vp(xive, state->act_server),
996 struct kvmppc_xive *xive = kvm->arch.xive;
1004 if (!xive)
1009 sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1019 prio = xive_lock_and_mask(xive, sb, state);
1046 if (xive->ops && xive->ops->reset_mapped) {
1047 xive->ops->reset_mapped(kvm, guest_irq);
1052 kvmppc_xive_vp(xive, state->act_server),
1078 struct kvmppc_xive *xive = kvm->arch.xive;
1081 for (i = 0; i <= xive->max_sbid; i++) {
1082 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1153 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1174 if (xc->xive->single_escalation)
1196 xive->q_page_order);
1214 static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
1216 /* We have a block of xive->nr_servers VPs. We just need to check
1219 return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
1222 int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
1226 if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
1231 if (xive->vp_base == XIVE_INVALID_VP) {
1232 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
1233 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
1235 if (xive->vp_base == XIVE_INVALID_VP)
1239 vp_id = kvmppc_xive_vp(xive, cpu);
1240 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1253 struct kvmppc_xive *xive = dev->private;
1264 if (xive->kvm != vcpu->kvm)
1270 mutex_lock(&xive->lock);
1272 r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
1283 xc->xive = xive;
1301 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1315 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1332 if (i == 7 && xive->single_escalation)
1336 if (xive->qmap & (1 << i)) {
1338 if (r == 0 && !xive->single_escalation)
1340 vcpu, i, xive->single_escalation);
1355 r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
1365 mutex_unlock(&xive->lock);
1378 static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1384 sb = kvmppc_xive_find_source(xive, irq, &idx);
1408 static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1418 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1428 static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1446 xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1452 static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1461 xive_pre_save_set_queued(xive, irq);
1465 static void xive_pre_save_scan(struct kvmppc_xive *xive)
1474 for (i = 0; i <= xive->max_sbid; i++) {
1475 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1479 xive_pre_save_mask_irq(xive, sb, j);
1483 kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1489 xive_pre_save_queue(xive, &xc->queues[j]);
1494 for (i = 0; i <= xive->max_sbid; i++) {
1495 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1499 xive_pre_save_unmask_irq(xive, sb, j);
1503 static void xive_post_save_scan(struct kvmppc_xive *xive)
1508 for (i = 0; i <= xive->max_sbid; i++) {
1509 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1517 xive->saved_src_count = 0;
1523 static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1531 sb = kvmppc_xive_find_source(xive, irq, &idx);
1558 if (xive->saved_src_count == 0)
1559 xive_pre_save_scan(xive);
1560 xive->saved_src_count++;
1596 if (xive->saved_src_count == xive->src_count)
1597 xive_post_save_scan(xive);
1607 struct kvmppc_xive *xive, int irq)
1614 mutex_lock(&xive->lock);
1617 if (xive->src_blocks[bid])
1635 xive->src_blocks[bid] = sb;
1637 if (bid > xive->max_sbid)
1638 xive->max_sbid = bid;
1641 mutex_unlock(&xive->lock);
1642 return xive->src_blocks[bid];
1645 static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1647 struct kvm *kvm = xive->kvm;
1659 xive->delayed_irqs--;
1666 static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1683 sb = kvmppc_xive_find_source(xive, irq, &idx);
1686 sb = kvmppc_xive_create_src_block(xive, irq);
1728 xive_lock_and_mask(xive, sb, state);
1751 mutex_lock(&xive->lock);
1752 rc = xive_check_provisioning(xive->kvm, act_prio);
1753 mutex_unlock(&xive->lock);
1757 rc = xive_target_interrupt(xive->kvm, state,
1770 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1817 xive_finish_unmask(xive, sb, state, guest_prio);
1823 xive->src_count++;
1832 struct kvmppc_xive *xive = kvm->arch.xive;
1837 if (!xive)
1840 sb = kvmppc_xive_find_source(xive, irq, &idx);
1866 int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
1880 mutex_lock(&xive->lock);
1881 if (xive->vp_base != XIVE_INVALID_VP)
1894 xive->nr_servers = KVM_MAX_VCPUS;
1896 xive->nr_servers = nr_servers;
1898 mutex_unlock(&xive->lock);
1905 struct kvmppc_xive *xive = dev->private;
1910 return xive_set_source(xive, attr->attr, attr->addr);
1914 return kvmppc_xive_set_nr_servers(xive, attr->addr);
1922 struct kvmppc_xive *xive = dev->private;
1927 return xive_get_source(xive, attr->attr, attr->addr);
1983 struct kvmppc_xive *xive = dev->private;
1984 struct kvm *kvm = xive->kvm;
1988 pr_devel("Releasing xive device\n");
1999 debugfs_remove(xive->dentry);
2021 * against xive code getting called during vcpu execution or
2024 kvm->arch.xive = NULL;
2027 for (i = 0; i <= xive->max_sbid; i++) {
2028 if (xive->src_blocks[i])
2029 kvmppc_xive_free_sources(xive->src_blocks[i]);
2030 kfree(xive->src_blocks[i]);
2031 xive->src_blocks[i] = NULL;
2034 if (xive->vp_base != XIVE_INVALID_VP)
2035 xive_native_free_vp_block(xive->vp_base);
2061 struct kvmppc_xive *xive = *kvm_xive_device;
2063 if (!xive) {
2064 xive = kzalloc(sizeof(*xive), GFP_KERNEL);
2065 *kvm_xive_device = xive;
2067 memset(xive, 0, sizeof(*xive));
2070 return xive;
2078 struct kvmppc_xive *xive;
2081 pr_devel("Creating xive for partition\n");
2084 if (kvm->arch.xive)
2087 xive = kvmppc_xive_get_device(kvm, type);
2088 if (!xive)
2091 dev->private = xive;
2092 xive->dev = dev;
2093 xive->kvm = kvm;
2094 mutex_init(&xive->lock);
2097 xive->q_order = xive_native_default_eq_shift();
2098 if (xive->q_order < PAGE_SHIFT)
2099 xive->q_page_order = 0;
2101 xive->q_page_order = xive->q_order - PAGE_SHIFT;
2104 xive->vp_base = XIVE_INVALID_VP;
2108 xive->nr_servers = KVM_MAX_VCPUS;
2110 xive->single_escalation = xive_native_has_single_escalation();
2112 kvm->arch.xive = xive;
2156 struct kvmppc_xive *xive = m->private;
2157 struct kvm *kvm = xive->kvm;
2214 static void xive_debugfs_init(struct kvmppc_xive *xive)
2218 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2224 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2225 xive, &xive_debug_fops);
2233 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2236 xive_debugfs_init(xive);
2240 .name = "kvm-xive",