1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Kevin Tian <kevin.tian@intel.com>
25 *    Eddie Dong <eddie.dong@intel.com>
26 *
27 * Contributors:
28 *    Niu Bing <bing.niu@intel.com>
29 *    Zhi Wang <zhi.a.wang@intel.com>
30 *
31 */
32
33#include <linux/types.h>
34#include <linux/kthread.h>
35
36#include "i915_drv.h"
37#include "intel_gvt.h"
38#include "gvt.h"
39#include <linux/vfio.h>
40#include <linux/mdev.h>
41
42struct intel_gvt_host intel_gvt_host;
43
44static const char * const supported_hypervisors[] = {
45	[INTEL_GVT_HYPERVISOR_XEN] = "XEN",
46	[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
47};
48
49static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
50		const char *name)
51{
52	const char *driver_name =
53		dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
54	int i;
55
56	name += strlen(driver_name) + 1;
57	for (i = 0; i < gvt->num_types; i++) {
58		struct intel_vgpu_type *t = &gvt->types[i];
59
60		if (!strncmp(t->name, name, sizeof(t->name)))
61			return t;
62	}
63
64	return NULL;
65}
66
67static ssize_t available_instances_show(struct kobject *kobj,
68					struct device *dev, char *buf)
69{
70	struct intel_vgpu_type *type;
71	unsigned int num = 0;
72	void *gvt = kdev_to_i915(dev)->gvt;
73
74	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
75	if (!type)
76		num = 0;
77	else
78		num = type->avail_instance;
79
80	return sprintf(buf, "%u\n", num);
81}
82
83static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
84		char *buf)
85{
86	return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
87}
88
89static ssize_t description_show(struct kobject *kobj, struct device *dev,
90		char *buf)
91{
92	struct intel_vgpu_type *type;
93	void *gvt = kdev_to_i915(dev)->gvt;
94
95	type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj));
96	if (!type)
97		return 0;
98
99	return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
100		       "fence: %d\nresolution: %s\n"
101		       "weight: %d\n",
102		       BYTES_TO_MB(type->low_gm_size),
103		       BYTES_TO_MB(type->high_gm_size),
104		       type->fence, vgpu_edid_str(type->resolution),
105		       type->weight);
106}
107
108static MDEV_TYPE_ATTR_RO(available_instances);
109static MDEV_TYPE_ATTR_RO(device_api);
110static MDEV_TYPE_ATTR_RO(description);
111
112static struct attribute *gvt_type_attrs[] = {
113	&mdev_type_attr_available_instances.attr,
114	&mdev_type_attr_device_api.attr,
115	&mdev_type_attr_description.attr,
116	NULL,
117};
118
119static struct attribute_group *gvt_vgpu_type_groups[] = {
120	[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
121};
122
123static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
124{
125	*intel_vgpu_type_groups = gvt_vgpu_type_groups;
126	return true;
127}
128
129static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
130{
131	int i, j;
132	struct intel_vgpu_type *type;
133	struct attribute_group *group;
134
135	for (i = 0; i < gvt->num_types; i++) {
136		type = &gvt->types[i];
137
138		group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
139		if (WARN_ON(!group))
140			goto unwind;
141
142		group->name = type->name;
143		group->attrs = gvt_type_attrs;
144		gvt_vgpu_type_groups[i] = group;
145	}
146
147	return 0;
148
149unwind:
150	for (j = 0; j < i; j++) {
151		group = gvt_vgpu_type_groups[j];
152		kfree(group);
153	}
154
155	return -ENOMEM;
156}
157
158static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
159{
160	int i;
161	struct attribute_group *group;
162
163	for (i = 0; i < gvt->num_types; i++) {
164		group = gvt_vgpu_type_groups[i];
165		gvt_vgpu_type_groups[i] = NULL;
166		kfree(group);
167	}
168}
169
170static const struct intel_gvt_ops intel_gvt_ops = {
171	.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
172	.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
173	.emulate_mmio_read = intel_vgpu_emulate_mmio_read,
174	.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
175	.vgpu_create = intel_gvt_create_vgpu,
176	.vgpu_destroy = intel_gvt_destroy_vgpu,
177	.vgpu_release = intel_gvt_release_vgpu,
178	.vgpu_reset = intel_gvt_reset_vgpu,
179	.vgpu_activate = intel_gvt_activate_vgpu,
180	.vgpu_deactivate = intel_gvt_deactivate_vgpu,
181	.gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
182	.get_gvt_attrs = intel_get_gvt_attrs,
183	.vgpu_query_plane = intel_vgpu_query_plane,
184	.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
185	.write_protect_handler = intel_vgpu_page_track_handler,
186	.emulate_hotplug = intel_vgpu_emulate_hotplug,
187};
188
189static void init_device_info(struct intel_gvt *gvt)
190{
191	struct intel_gvt_device_info *info = &gvt->device_info;
192	struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
193
194	info->max_support_vgpus = 8;
195	info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
196	info->mmio_size = 2 * 1024 * 1024;
197	info->mmio_bar = 0;
198	info->gtt_start_offset = 8 * 1024 * 1024;
199	info->gtt_entry_size = 8;
200	info->gtt_entry_size_shift = 3;
201	info->gmadr_bytes_in_cmd = 8;
202	info->max_surface_size = 36 * 1024 * 1024;
203	info->msi_cap_offset = pdev->msi_cap;
204}
205
206static int gvt_service_thread(void *data)
207{
208	struct intel_gvt *gvt = (struct intel_gvt *)data;
209	int ret;
210
211	gvt_dbg_core("service thread start\n");
212
213	while (!kthread_should_stop()) {
214		ret = wait_event_interruptible(gvt->service_thread_wq,
215				kthread_should_stop() || gvt->service_request);
216
217		if (kthread_should_stop())
218			break;
219
220		if (WARN_ONCE(ret, "service thread is waken up by signal.\n"))
221			continue;
222
223		if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK,
224					(void *)&gvt->service_request))
225			intel_gvt_emulate_vblank(gvt);
226
227		if (test_bit(INTEL_GVT_REQUEST_SCHED,
228				(void *)&gvt->service_request) ||
229			test_bit(INTEL_GVT_REQUEST_EVENT_SCHED,
230					(void *)&gvt->service_request)) {
231			intel_gvt_schedule(gvt);
232		}
233	}
234
235	return 0;
236}
237
238static void clean_service_thread(struct intel_gvt *gvt)
239{
240	kthread_stop(gvt->service_thread);
241}
242
243static int init_service_thread(struct intel_gvt *gvt)
244{
245	init_waitqueue_head(&gvt->service_thread_wq);
246
247	gvt->service_thread = kthread_run(gvt_service_thread,
248			gvt, "gvt_service_thread");
249	if (IS_ERR(gvt->service_thread)) {
250		gvt_err("fail to start service thread.\n");
251		return PTR_ERR(gvt->service_thread);
252	}
253	return 0;
254}
255
256/**
257 * intel_gvt_clean_device - clean a GVT device
258 * @i915: i915 private
259 *
260 * This function is called at the driver unloading stage, to free the
261 * resources owned by a GVT device.
262 *
263 */
264void intel_gvt_clean_device(struct drm_i915_private *i915)
265{
266	struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
267
268	if (drm_WARN_ON(&i915->drm, !gvt))
269		return;
270
271	intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
272	intel_gvt_cleanup_vgpu_type_groups(gvt);
273	intel_gvt_clean_vgpu_types(gvt);
274
275	intel_gvt_debugfs_clean(gvt);
276	clean_service_thread(gvt);
277	intel_gvt_clean_cmd_parser(gvt);
278	intel_gvt_clean_sched_policy(gvt);
279	intel_gvt_clean_workload_scheduler(gvt);
280	intel_gvt_clean_gtt(gvt);
281	intel_gvt_clean_irq(gvt);
282	intel_gvt_free_firmware(gvt);
283	intel_gvt_clean_mmio_info(gvt);
284	idr_destroy(&gvt->vgpu_idr);
285
286	kfree(i915->gvt);
287}
288
289/**
290 * intel_gvt_init_device - initialize a GVT device
291 * @i915: drm i915 private data
292 *
293 * This function is called at the initialization stage, to initialize
294 * necessary GVT components.
295 *
296 * Returns:
297 * Zero on success, negative error code if failed.
298 *
299 */
300int intel_gvt_init_device(struct drm_i915_private *i915)
301{
302	struct intel_gvt *gvt;
303	struct intel_vgpu *vgpu;
304	int ret;
305
306	if (drm_WARN_ON(&i915->drm, i915->gvt))
307		return -EEXIST;
308
309	gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
310	if (!gvt)
311		return -ENOMEM;
312
313	gvt_dbg_core("init gvt device\n");
314
315	idr_init(&gvt->vgpu_idr);
316	spin_lock_init(&gvt->scheduler.mmio_context_lock);
317	mutex_init(&gvt->lock);
318	mutex_init(&gvt->sched_lock);
319	gvt->gt = &i915->gt;
320	i915->gvt = gvt;
321
322	init_device_info(gvt);
323
324	ret = intel_gvt_setup_mmio_info(gvt);
325	if (ret)
326		goto out_clean_idr;
327
328	intel_gvt_init_engine_mmio_context(gvt);
329
330	ret = intel_gvt_load_firmware(gvt);
331	if (ret)
332		goto out_clean_mmio_info;
333
334	ret = intel_gvt_init_irq(gvt);
335	if (ret)
336		goto out_free_firmware;
337
338	ret = intel_gvt_init_gtt(gvt);
339	if (ret)
340		goto out_clean_irq;
341
342	ret = intel_gvt_init_workload_scheduler(gvt);
343	if (ret)
344		goto out_clean_gtt;
345
346	ret = intel_gvt_init_sched_policy(gvt);
347	if (ret)
348		goto out_clean_workload_scheduler;
349
350	ret = intel_gvt_init_cmd_parser(gvt);
351	if (ret)
352		goto out_clean_sched_policy;
353
354	ret = init_service_thread(gvt);
355	if (ret)
356		goto out_clean_cmd_parser;
357
358	ret = intel_gvt_init_vgpu_types(gvt);
359	if (ret)
360		goto out_clean_thread;
361
362	ret = intel_gvt_init_vgpu_type_groups(gvt);
363	if (ret) {
364		gvt_err("failed to init vgpu type groups: %d\n", ret);
365		goto out_clean_types;
366	}
367
368	vgpu = intel_gvt_create_idle_vgpu(gvt);
369	if (IS_ERR(vgpu)) {
370		ret = PTR_ERR(vgpu);
371		gvt_err("failed to create idle vgpu\n");
372		goto out_clean_types;
373	}
374	gvt->idle_vgpu = vgpu;
375
376	intel_gvt_debugfs_init(gvt);
377
378	gvt_dbg_core("gvt device initialization is done\n");
379	intel_gvt_host.dev = &i915->drm.pdev->dev;
380	intel_gvt_host.initialized = true;
381	return 0;
382
383out_clean_types:
384	intel_gvt_clean_vgpu_types(gvt);
385out_clean_thread:
386	clean_service_thread(gvt);
387out_clean_cmd_parser:
388	intel_gvt_clean_cmd_parser(gvt);
389out_clean_sched_policy:
390	intel_gvt_clean_sched_policy(gvt);
391out_clean_workload_scheduler:
392	intel_gvt_clean_workload_scheduler(gvt);
393out_clean_gtt:
394	intel_gvt_clean_gtt(gvt);
395out_clean_irq:
396	intel_gvt_clean_irq(gvt);
397out_free_firmware:
398	intel_gvt_free_firmware(gvt);
399out_clean_mmio_info:
400	intel_gvt_clean_mmio_info(gvt);
401out_clean_idr:
402	idr_destroy(&gvt->vgpu_idr);
403	kfree(gvt);
404	i915->gvt = NULL;
405	return ret;
406}
407
408int
409intel_gvt_pm_resume(struct intel_gvt *gvt)
410{
411	intel_gvt_restore_fence(gvt);
412	intel_gvt_restore_mmio(gvt);
413	intel_gvt_restore_ggtt(gvt);
414	return 0;
415}
416
417int
418intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
419{
420	int ret;
421	void *gvt;
422
423	if (!intel_gvt_host.initialized)
424		return -ENODEV;
425
426	if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
427	    m->type != INTEL_GVT_HYPERVISOR_XEN)
428		return -EINVAL;
429
430	/* Get a reference for device model module */
431	if (!try_module_get(THIS_MODULE))
432		return -ENODEV;
433
434	intel_gvt_host.mpt = m;
435	intel_gvt_host.hypervisor_type = m->type;
436	gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
437
438	ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
439					     &intel_gvt_ops);
440	if (ret < 0) {
441		gvt_err("Failed to init %s hypervisor module\n",
442			supported_hypervisors[intel_gvt_host.hypervisor_type]);
443		module_put(THIS_MODULE);
444		return -ENODEV;
445	}
446	gvt_dbg_core("Running with hypervisor %s in host mode\n",
447		     supported_hypervisors[intel_gvt_host.hypervisor_type]);
448	return 0;
449}
450EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
451
452void
453intel_gvt_unregister_hypervisor(void)
454{
455	intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
456	module_put(THIS_MODULE);
457}
458EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
459