1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3  *
4  * (C) COPYRIGHT 2010-2016, 2018-2021 ARM Limited. All rights reserved.
5  *
6  * This program is free software and is provided to you under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation, and any use by you of this program is subject to the terms
9  * of such GNU license.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, you can access it online at
18  * http://www.gnu.org/licenses/gpl-2.0.html.
19  *
20  */
21 
22 #include <mali_kbase.h>
23 #include <mali_kbase_debug.h>
24 #include <tl/mali_kbase_tracepoints.h>
25 #include <mali_linux_trace.h>
26 
kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)27 static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
28 {
29 	struct base_jd_udata data;
30 	struct kbase_device *kbdev;
31 
32 	lockdep_assert_held(&kctx->jctx.lock);
33 
34 	KBASE_DEBUG_ASSERT(kctx != NULL);
35 	KBASE_DEBUG_ASSERT(katom != NULL);
36 	KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
37 
38 	kbdev = kctx->kbdev;
39 	data = katom->udata;
40 
41 	KBASE_TLSTREAM_TL_NRET_ATOM_CTX(kbdev, katom, kctx);
42 	KBASE_TLSTREAM_TL_DEL_ATOM(kbdev, katom);
43 
44 	katom->status = KBASE_JD_ATOM_STATE_UNUSED;
45 	dev_dbg(kbdev->dev, "Atom %pK status to unused\n", (void *)katom);
46 	wake_up(&katom->completed);
47 
48 	return data;
49 }
50 
kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)51 int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
52 {
53 	struct kbase_jd_atom *atom;
54 
55 	KBASE_DEBUG_ASSERT(ctx);
56 
57 	mutex_lock(&ctx->event_mutex);
58 
59 	if (list_empty(&ctx->event_list)) {
60 		if (!atomic_read(&ctx->event_closed)) {
61 			mutex_unlock(&ctx->event_mutex);
62 			return -1;
63 		}
64 
65 		/* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */
66 		mutex_unlock(&ctx->event_mutex);
67 		uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
68 		memset(&uevent->udata, 0, sizeof(uevent->udata));
69 		dev_dbg(ctx->kbdev->dev,
70 				"event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
71 				BASE_JD_EVENT_DRV_TERMINATED);
72 		return 0;
73 	}
74 
75 	/* normal event processing */
76 	atomic_dec(&ctx->event_count);
77 	atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]);
78 	list_del(ctx->event_list.next);
79 
80 	mutex_unlock(&ctx->event_mutex);
81 
82 	dev_dbg(ctx->kbdev->dev, "event dequeuing %pK\n", (void *)atom);
83 	uevent->event_code = atom->event_code;
84 
85 	uevent->atom_number = (atom - ctx->jctx.atoms);
86 
87 	if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
88 		kbase_jd_free_external_resources(atom);
89 
90 	mutex_lock(&ctx->jctx.lock);
91 	uevent->udata = kbase_event_process(ctx, atom);
92 	mutex_unlock(&ctx->jctx.lock);
93 
94 	return 0;
95 }
96 
97 KBASE_EXPORT_TEST_API(kbase_event_dequeue);
98 
99 /**
100  * kbase_event_process_noreport_worker - Worker for processing atoms that do not
101  *                                       return an event but do have external
102  *                                       resources
103  * @data:  Work structure
104  */
kbase_event_process_noreport_worker(struct work_struct *data)105 static void kbase_event_process_noreport_worker(struct work_struct *data)
106 {
107 	struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
108 			work);
109 	struct kbase_context *kctx = katom->kctx;
110 
111 	if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
112 		kbase_jd_free_external_resources(katom);
113 
114 	mutex_lock(&kctx->jctx.lock);
115 	kbase_event_process(kctx, katom);
116 	mutex_unlock(&kctx->jctx.lock);
117 }
118 
119 /**
120  * kbase_event_process_noreport - Process atoms that do not return an event
121  * @kctx:  Context pointer
122  * @katom: Atom to be processed
123  *
124  * Atoms that do not have external resources will be processed immediately.
125  * Atoms that do have external resources will be processed on a workqueue, in
126  * order to avoid locking issues.
127  */
kbase_event_process_noreport(struct kbase_context *kctx, struct kbase_jd_atom *katom)128 static void kbase_event_process_noreport(struct kbase_context *kctx,
129 		struct kbase_jd_atom *katom)
130 {
131 	if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
132 		INIT_WORK(&katom->work, kbase_event_process_noreport_worker);
133 		queue_work(kctx->event_workq, &katom->work);
134 	} else {
135 		kbase_event_process(kctx, katom);
136 	}
137 }
138 
139 /**
140  * kbase_event_coalesce - Move pending events to the main event list
141  * @kctx:  Context pointer
142  *
143  * kctx->event_list and kctx->event_coalesce_count must be protected
144  * by a lock unless this is the last thread using them
145  * (and we're about to terminate the lock).
146  *
147  * Return: The number of pending events moved to the main event list
148  */
kbase_event_coalesce(struct kbase_context *kctx)149 static int kbase_event_coalesce(struct kbase_context *kctx)
150 {
151 	const int event_count = kctx->event_coalesce_count;
152 
153 	/* Join the list of pending events onto the tail of the main list
154 	 * and reset it
155 	 */
156 	list_splice_tail_init(&kctx->event_coalesce_list, &kctx->event_list);
157 	kctx->event_coalesce_count = 0;
158 
159 	/* Return the number of events moved */
160 	return event_count;
161 }
162 
kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)163 void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)
164 {
165 	struct kbase_device *kbdev = ctx->kbdev;
166 
167 	dev_dbg(kbdev->dev, "Posting event for atom %pK\n", (void *)atom);
168 
169 	if (WARN_ON(atom->status != KBASE_JD_ATOM_STATE_COMPLETED)) {
170 		dev_warn(kbdev->dev,
171 				"%s: Atom %d (%pK) not completed (status %d)\n",
172 				__func__,
173 				kbase_jd_atom_id(atom->kctx, atom),
174 				atom->kctx,
175 				atom->status);
176 		return;
177 	}
178 
179 	if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) {
180 		if (atom->event_code == BASE_JD_EVENT_DONE) {
181 			dev_dbg(kbdev->dev, "Suppressing event (atom done)\n");
182 			kbase_event_process_noreport(ctx, atom);
183 			return;
184 		}
185 	}
186 
187 	if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) {
188 		dev_dbg(kbdev->dev, "Suppressing event (never)\n");
189 		kbase_event_process_noreport(ctx, atom);
190 		return;
191 	}
192 	KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(kbdev, atom, TL_ATOM_STATE_POSTED);
193 	if (atom->core_req & BASE_JD_REQ_EVENT_COALESCE) {
194 		/* Don't report the event until other event(s) have completed */
195 		dev_dbg(kbdev->dev, "Deferring event (coalesced)\n");
196 		mutex_lock(&ctx->event_mutex);
197 		list_add_tail(&atom->dep_item[0], &ctx->event_coalesce_list);
198 		++ctx->event_coalesce_count;
199 		mutex_unlock(&ctx->event_mutex);
200 	} else {
201 		/* Report the event and any pending events now */
202 		int event_count = 1;
203 
204 		mutex_lock(&ctx->event_mutex);
205 		event_count += kbase_event_coalesce(ctx);
206 		list_add_tail(&atom->dep_item[0], &ctx->event_list);
207 		atomic_add(event_count, &ctx->event_count);
208 		mutex_unlock(&ctx->event_mutex);
209 		dev_dbg(kbdev->dev, "Reporting %d events\n", event_count);
210 
211 		kbase_event_wakeup(ctx);
212 
213 		/* Post-completion latency */
214 		trace_sysgraph(SGR_POST, ctx->id,
215 					kbase_jd_atom_id(ctx, atom));
216 	}
217 }
218 KBASE_EXPORT_TEST_API(kbase_event_post);
219 
kbase_event_close(struct kbase_context *kctx)220 void kbase_event_close(struct kbase_context *kctx)
221 {
222 	mutex_lock(&kctx->event_mutex);
223 	atomic_set(&kctx->event_closed, true);
224 	mutex_unlock(&kctx->event_mutex);
225 	kbase_event_wakeup(kctx);
226 }
227 
kbase_event_init(struct kbase_context *kctx)228 int kbase_event_init(struct kbase_context *kctx)
229 {
230 	KBASE_DEBUG_ASSERT(kctx);
231 
232 	INIT_LIST_HEAD(&kctx->event_list);
233 	INIT_LIST_HEAD(&kctx->event_coalesce_list);
234 	mutex_init(&kctx->event_mutex);
235 	kctx->event_coalesce_count = 0;
236 	kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1);
237 
238 	if (kctx->event_workq == NULL)
239 		return -EINVAL;
240 
241 	return 0;
242 }
243 
244 KBASE_EXPORT_TEST_API(kbase_event_init);
245 
kbase_event_cleanup(struct kbase_context *kctx)246 void kbase_event_cleanup(struct kbase_context *kctx)
247 {
248 	int event_count;
249 
250 	KBASE_DEBUG_ASSERT(kctx);
251 	KBASE_DEBUG_ASSERT(kctx->event_workq);
252 
253 	flush_workqueue(kctx->event_workq);
254 	destroy_workqueue(kctx->event_workq);
255 
256 	/* We use kbase_event_dequeue to remove the remaining events as that
257 	 * deals with all the cleanup needed for the atoms.
258 	 *
259 	 * Note: use of kctx->event_list without a lock is safe because this must be the last
260 	 * thread using it (because we're about to terminate the lock)
261 	 */
262 	event_count = kbase_event_coalesce(kctx);
263 	atomic_add(event_count, &kctx->event_count);
264 
265 	while (!list_empty(&kctx->event_list)) {
266 		struct base_jd_event_v2 event;
267 
268 		kbase_event_dequeue(kctx, &event);
269 	}
270 }
271 
272 KBASE_EXPORT_TEST_API(kbase_event_cleanup);
273