1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/kthread.h>
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/wait.h>
29
30#include <drm/gpu_scheduler.h>
31
32static struct kmem_cache *sched_fence_slab;
33
34static int __init drm_sched_fence_slab_init(void)
35{
36	sched_fence_slab = kmem_cache_create(
37		"drm_sched_fence", sizeof(struct drm_sched_fence), 0,
38		SLAB_HWCACHE_ALIGN, NULL);
39	if (!sched_fence_slab)
40		return -ENOMEM;
41
42	return 0;
43}
44
45static void __exit drm_sched_fence_slab_fini(void)
46{
47	rcu_barrier();
48	kmem_cache_destroy(sched_fence_slab);
49}
50
51void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
52{
53	int ret = dma_fence_signal(&fence->scheduled);
54
55	if (!ret)
56		DMA_FENCE_TRACE(&fence->scheduled,
57				"signaled from irq context\n");
58	else
59		DMA_FENCE_TRACE(&fence->scheduled,
60				"was already signaled\n");
61}
62
63void drm_sched_fence_finished(struct drm_sched_fence *fence)
64{
65	int ret = dma_fence_signal(&fence->finished);
66
67	if (!ret)
68		DMA_FENCE_TRACE(&fence->finished,
69				"signaled from irq context\n");
70	else
71		DMA_FENCE_TRACE(&fence->finished,
72				"was already signaled\n");
73}
74
75static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
76{
77	return "drm_sched";
78}
79
80static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
81{
82	struct drm_sched_fence *fence = to_drm_sched_fence(f);
83	return (const char *)fence->sched->name;
84}
85
86/**
87 * drm_sched_fence_free - free up the fence memory
88 *
89 * @rcu: RCU callback head
90 *
91 * Free up the fence memory after the RCU grace period.
92 */
93static void drm_sched_fence_free(struct rcu_head *rcu)
94{
95	struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
96	struct drm_sched_fence *fence = to_drm_sched_fence(f);
97
98	kmem_cache_free(sched_fence_slab, fence);
99}
100
101/**
102 * drm_sched_fence_release_scheduled - callback that fence can be freed
103 *
104 * @f: fence
105 *
106 * This function is called when the reference count becomes zero.
107 * It just RCU schedules freeing up the fence.
108 */
109static void drm_sched_fence_release_scheduled(struct dma_fence *f)
110{
111	struct drm_sched_fence *fence = to_drm_sched_fence(f);
112
113	dma_fence_put(fence->parent);
114	call_rcu(&fence->finished.rcu, drm_sched_fence_free);
115}
116
117/**
118 * drm_sched_fence_release_finished - drop extra reference
119 *
120 * @f: fence
121 *
122 * Drop the extra reference from the scheduled fence to the base fence.
123 */
124static void drm_sched_fence_release_finished(struct dma_fence *f)
125{
126	struct drm_sched_fence *fence = to_drm_sched_fence(f);
127
128	dma_fence_put(&fence->scheduled);
129}
130
131static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
132	.get_driver_name = drm_sched_fence_get_driver_name,
133	.get_timeline_name = drm_sched_fence_get_timeline_name,
134	.release = drm_sched_fence_release_scheduled,
135};
136
137static const struct dma_fence_ops drm_sched_fence_ops_finished = {
138	.get_driver_name = drm_sched_fence_get_driver_name,
139	.get_timeline_name = drm_sched_fence_get_timeline_name,
140	.release = drm_sched_fence_release_finished,
141};
142
143struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
144{
145	if (f->ops == &drm_sched_fence_ops_scheduled)
146		return container_of(f, struct drm_sched_fence, scheduled);
147
148	if (f->ops == &drm_sched_fence_ops_finished)
149		return container_of(f, struct drm_sched_fence, finished);
150
151	return NULL;
152}
153EXPORT_SYMBOL(to_drm_sched_fence);
154
155struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
156					       void *owner)
157{
158	struct drm_sched_fence *fence = NULL;
159	unsigned seq;
160
161	fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
162	if (fence == NULL)
163		return NULL;
164
165	fence->owner = owner;
166	fence->sched = entity->rq->sched;
167	spin_lock_init(&fence->lock);
168
169	seq = atomic_inc_return(&entity->fence_seq);
170	dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
171		       &fence->lock, entity->fence_context, seq);
172	dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
173		       &fence->lock, entity->fence_context + 1, seq);
174
175	return fence;
176}
177
178module_init(drm_sched_fence_slab_init);
179module_exit(drm_sched_fence_slab_fini);
180
181MODULE_DESCRIPTION("DRM GPU scheduler");
182MODULE_LICENSE("GPL and additional rights");
183