1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Interrupt delivery
7 *
8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/vmalloc.h>
15#include <linux/fs.h>
16#include <linux/memblock.h>
17#include <asm/page.h>
18#include <asm/cacheflush.h>
19
20#include <linux/kvm_host.h>
21
22#include "interrupt.h"
23
24void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
25{
26	set_bit(priority, &vcpu->arch.pending_exceptions);
27}
28
29void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
30{
31	clear_bit(priority, &vcpu->arch.pending_exceptions);
32}
33
34void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
35{
36	/*
37	 * Cause bits to reflect the pending timer interrupt,
38	 * the EXC code will be set when we are actually
39	 * delivering the interrupt:
40	 */
41	kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
42
43	/* Queue up an INT exception for the core */
44	kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
45
46}
47
48void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
49{
50	kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
51	kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
52}
53
54void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
55			      struct kvm_mips_interrupt *irq)
56{
57	int intr = (int)irq->irq;
58
59	/*
60	 * Cause bits to reflect the pending IO interrupt,
61	 * the EXC code will be set when we are actually
62	 * delivering the interrupt:
63	 */
64	kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8));
65	kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr));
66}
67
68void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
69				struct kvm_mips_interrupt *irq)
70{
71	int intr = (int)irq->irq;
72
73	kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8));
74	kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
75}
76
77/* Deliver the interrupt of the corresponding priority, if possible. */
78int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
79			    u32 cause)
80{
81	int allowed = 0;
82	u32 exccode, ie;
83
84	struct kvm_vcpu_arch *arch = &vcpu->arch;
85	struct mips_coproc *cop0 = vcpu->arch.cop0;
86
87	if (priority == MIPS_EXC_MAX)
88		return 0;
89
90	ie = 1 << (kvm_priority_to_irq[priority] + 8);
91	if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
92	    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
93	    && (kvm_read_c0_guest_status(cop0) & ie)) {
94		allowed = 1;
95		exccode = EXCCODE_INT;
96	}
97
98	/* Are we allowed to deliver the interrupt ??? */
99	if (allowed) {
100		if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
101			/* save old pc */
102			kvm_write_c0_guest_epc(cop0, arch->pc);
103			kvm_set_c0_guest_status(cop0, ST0_EXL);
104
105			if (cause & CAUSEF_BD)
106				kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
107			else
108				kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
109
110			kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
111
112		} else
113			kvm_err("Trying to deliver interrupt when EXL is already set\n");
114
115		kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
116					  (exccode << CAUSEB_EXCCODE));
117
118		/* XXXSL Set PC to the interrupt exception entry point */
119		arch->pc = kvm_mips_guest_exception_base(vcpu);
120		if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
121			arch->pc += 0x200;
122		else
123			arch->pc += 0x180;
124
125		clear_bit(priority, &vcpu->arch.pending_exceptions);
126	}
127
128	return allowed;
129}
130
131int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
132			  u32 cause)
133{
134	return 1;
135}
136
137void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
138{
139	unsigned long *pending = &vcpu->arch.pending_exceptions;
140	unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
141	unsigned int priority;
142
143	if (!(*pending) && !(*pending_clr))
144		return;
145
146	priority = __ffs(*pending_clr);
147	while (priority <= MIPS_EXC_MAX) {
148		if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
149			if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
150				break;
151		}
152
153		priority = find_next_bit(pending_clr,
154					 BITS_PER_BYTE * sizeof(*pending_clr),
155					 priority + 1);
156	}
157
158	priority = __ffs(*pending);
159	while (priority <= MIPS_EXC_MAX) {
160		if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
161			if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
162				break;
163		}
164
165		priority = find_next_bit(pending,
166					 BITS_PER_BYTE * sizeof(*pending),
167					 priority + 1);
168	}
169
170}
171
172int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
173{
174	return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
175}
176