1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5
6#ifndef __LOONGARCH_KVM_CSR_H__
7#define __LOONGARCH_KVM_CSR_H__
8#include <asm/kvm_host.h>
9#include <asm/watch.h>
10#include <linux/uaccess.h>
11#include <linux/kvm_host.h>
12#include "kvmcpu.h"
13
14#define kvm_read_hw_gcsr(id)			gcsr_read(id)
15#define kvm_write_hw_gcsr(csr, id, val)		gcsr_write(val, id)
16
17int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force);
18int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force);
19unsigned long _kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid);
20void _kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val);
21void _kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
22	unsigned long csr_mask, unsigned long val);
23int _kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
24
25static inline void kvm_save_hw_gcsr(struct loongarch_csrs *csr, int gid)
26{
27	csr->csrs[gid] = gcsr_read(gid);
28}
29
30static inline void kvm_restore_hw_gcsr(struct loongarch_csrs *csr, int gid)
31{
32	gcsr_write(csr->csrs[gid], gid);
33}
34
35static inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
36{
37	return csr->csrs[gid];
38}
39
40static inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned long val)
41{
42	csr->csrs[gid] = val;
43}
44
45static inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned long val)
46{
47	csr->csrs[gid] |= val;
48}
49
50static inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned mask,
51	unsigned long val)
52{
53	unsigned long _mask = mask;
54	csr->csrs[gid] &= ~_mask;
55	csr->csrs[gid] |= val & _mask;
56}
57
58
59#define GET_HW_GCSR(id, csrid, v)				\
60	do {							\
61		if (csrid == id) {				\
62			*v = (long)kvm_read_hw_gcsr(csrid);	\
63			return 0;				\
64		}						\
65	} while (0)
66
67#define GET_SW_GCSR(csr, id, csrid, v)				\
68	do {							\
69		if (csrid == id) {				\
70			*v = kvm_read_sw_gcsr(csr, id);		\
71			return 0;				\
72		}						\
73	} while (0)
74
75#define SET_HW_GCSR(csr, id, csrid, v)				\
76	do {							\
77		if (csrid == id) {				\
78			kvm_write_hw_gcsr(csr, csrid, *v);	\
79			return 0;				\
80		}						\
81	} while (0)
82
83#define SET_SW_GCSR(csr, id, csrid, v)				\
84	do {							\
85		if (csrid == id) {				\
86			kvm_write_sw_gcsr(csr, csrid, *v);	\
87			return 0;				\
88		}						\
89	} while (0)
90
91int _kvm_init_iocsr(struct kvm *kvm);
92int _kvm_set_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp);
93int _kvm_get_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp);
94
95#define KVM_PMU_PLV_ENABLE      (CSR_PERFCTRL_PLV0 |            \
96					CSR_PERFCTRL_PLV1 |     \
97					CSR_PERFCTRL_PLV2 |     \
98					CSR_PERFCTRL_PLV3)
99
100#define CASE_WRITE_HW_PMU(vcpu, csr, id, csrid, v)                                 \
101	do {                                                                            \
102		if (csrid == id) {                                                      \
103			if (v & KVM_PMU_PLV_ENABLE) {                                   \
104				write_csr_gcfg(read_csr_gcfg() | CSR_GCFG_GPERF);       \
105				kvm_write_hw_gcsr(csr, csrid, v | CSR_PERFCTRL_GMOD);   \
106				vcpu->arch.aux_inuse |= KVM_LARCH_PERF;                 \
107				return ;                                                \
108			} else {                                                        \
109				kvm_write_sw_gcsr(csr, csrid, v);                       \
110				return;                                                 \
111			}                                                               \
112		}                                                                       \
113	} while (0)
114
115#endif	/* __LOONGARCH_KVM_CSR_H__ */
116