1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013-2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/dma-fence.h>
8
9#include "msm_drv.h"
10#include "msm_fence.h"
11
12
13struct msm_fence_context *
14msm_fence_context_alloc(struct drm_device *dev, const char *name)
15{
16	struct msm_fence_context *fctx;
17
18	fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
19	if (!fctx)
20		return ERR_PTR(-ENOMEM);
21
22	fctx->dev = dev;
23	strscpy(fctx->name, name, sizeof(fctx->name));
24	fctx->context = dma_fence_context_alloc(1);
25	init_waitqueue_head(&fctx->event);
26	spin_lock_init(&fctx->spinlock);
27
28	return fctx;
29}
30
31void msm_fence_context_free(struct msm_fence_context *fctx)
32{
33	kfree(fctx);
34}
35
36static inline bool fence_completed(struct msm_fence_context *fctx, uint32_t fence)
37{
38	return (int32_t)(fctx->completed_fence - fence) >= 0;
39}
40
41/* legacy path for WAIT_FENCE ioctl: */
42int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
43		ktime_t *timeout, bool interruptible)
44{
45	int ret;
46
47	if (fence > fctx->last_fence) {
48		DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
49				fctx->name, fence, fctx->last_fence);
50		return -EINVAL;
51	}
52
53	if (!timeout) {
54		/* no-wait: */
55		ret = fence_completed(fctx, fence) ? 0 : -EBUSY;
56	} else {
57		unsigned long remaining_jiffies = timeout_to_jiffies(timeout);
58
59		if (interruptible)
60			ret = wait_event_interruptible_timeout(fctx->event,
61				fence_completed(fctx, fence),
62				remaining_jiffies);
63		else
64			ret = wait_event_timeout(fctx->event,
65				fence_completed(fctx, fence),
66				remaining_jiffies);
67
68		if (ret == 0) {
69			DBG("timeout waiting for fence: %u (completed: %u)",
70					fence, fctx->completed_fence);
71			ret = -ETIMEDOUT;
72		} else if (ret != -ERESTARTSYS) {
73			ret = 0;
74		}
75	}
76
77	return ret;
78}
79
80/* called from workqueue */
81void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
82{
83	spin_lock(&fctx->spinlock);
84	fctx->completed_fence = max(fence, fctx->completed_fence);
85	spin_unlock(&fctx->spinlock);
86
87	wake_up_all(&fctx->event);
88}
89
90struct msm_fence {
91	struct dma_fence base;
92	struct msm_fence_context *fctx;
93};
94
95static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
96{
97	return container_of(fence, struct msm_fence, base);
98}
99
100static const char *msm_fence_get_driver_name(struct dma_fence *fence)
101{
102	return "msm";
103}
104
105static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
106{
107	struct msm_fence *f = to_msm_fence(fence);
108	return f->fctx->name;
109}
110
111static bool msm_fence_signaled(struct dma_fence *fence)
112{
113	struct msm_fence *f = to_msm_fence(fence);
114	return fence_completed(f->fctx, f->base.seqno);
115}
116
117static const struct dma_fence_ops msm_fence_ops = {
118	.get_driver_name = msm_fence_get_driver_name,
119	.get_timeline_name = msm_fence_get_timeline_name,
120	.signaled = msm_fence_signaled,
121};
122
123struct dma_fence *
124msm_fence_alloc(struct msm_fence_context *fctx)
125{
126	struct msm_fence *f;
127
128	f = kzalloc(sizeof(*f), GFP_KERNEL);
129	if (!f)
130		return ERR_PTR(-ENOMEM);
131
132	f->fctx = fctx;
133
134	dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
135		       fctx->context, ++fctx->last_fence);
136
137	return &f->base;
138}
139