1 /*
2 *
3 * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
13 *
14 */
15
16
17
18
19
20 #include <mali_kbase.h>
21 #include <mali_kbase_debug.h>
22 #include <mali_kbase_tlstream.h>
23
kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)24 static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, struct kbase_jd_atom *katom)
25 {
26 struct base_jd_udata data;
27
28 lockdep_assert_held(&kctx->jctx.lock);
29
30 KBASE_DEBUG_ASSERT(kctx != NULL);
31 KBASE_DEBUG_ASSERT(katom != NULL);
32 KBASE_DEBUG_ASSERT(katom->status == KBASE_JD_ATOM_STATE_COMPLETED);
33
34 data = katom->udata;
35
36 KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight));
37
38 KBASE_TLSTREAM_TL_NRET_ATOM_CTX(katom, kctx);
39 KBASE_TLSTREAM_TL_DEL_ATOM(katom);
40
41 katom->status = KBASE_JD_ATOM_STATE_UNUSED;
42
43 wake_up(&katom->completed);
44
45 return data;
46 }
47
kbase_event_pending(struct kbase_context *ctx)48 int kbase_event_pending(struct kbase_context *ctx)
49 {
50 KBASE_DEBUG_ASSERT(ctx);
51
52 return (atomic_read(&ctx->event_count) != 0) ||
53 (atomic_read(&ctx->event_closed) != 0);
54 }
55
56 KBASE_EXPORT_TEST_API(kbase_event_pending);
57
kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)58 int kbase_event_dequeue(struct kbase_context *ctx, struct base_jd_event_v2 *uevent)
59 {
60 struct kbase_jd_atom *atom;
61
62 KBASE_DEBUG_ASSERT(ctx);
63
64 mutex_lock(&ctx->event_mutex);
65
66 if (list_empty(&ctx->event_list)) {
67 if (!atomic_read(&ctx->event_closed)) {
68 mutex_unlock(&ctx->event_mutex);
69 return -1;
70 }
71
72 /* generate the BASE_JD_EVENT_DRV_TERMINATED message on the fly */
73 mutex_unlock(&ctx->event_mutex);
74 uevent->event_code = BASE_JD_EVENT_DRV_TERMINATED;
75 memset(&uevent->udata, 0, sizeof(uevent->udata));
76 dev_dbg(ctx->kbdev->dev,
77 "event system closed, returning BASE_JD_EVENT_DRV_TERMINATED(0x%X)\n",
78 BASE_JD_EVENT_DRV_TERMINATED);
79 return 0;
80 }
81
82 /* normal event processing */
83 atomic_dec(&ctx->event_count);
84 atom = list_entry(ctx->event_list.next, struct kbase_jd_atom, dep_item[0]);
85 list_del(ctx->event_list.next);
86
87 mutex_unlock(&ctx->event_mutex);
88
89 dev_dbg(ctx->kbdev->dev, "event dequeuing %p\n", (void *)atom);
90 uevent->event_code = atom->event_code;
91 uevent->atom_number = (atom - ctx->jctx.atoms);
92
93 if (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
94 kbase_jd_free_external_resources(atom);
95
96 mutex_lock(&ctx->jctx.lock);
97 uevent->udata = kbase_event_process(ctx, atom);
98 mutex_unlock(&ctx->jctx.lock);
99
100 return 0;
101 }
102
103 KBASE_EXPORT_TEST_API(kbase_event_dequeue);
104
105 /**
106 * kbase_event_process_noreport_worker - Worker for processing atoms that do not
107 * return an event but do have external
108 * resources
109 * @data: Work structure
110 */
kbase_event_process_noreport_worker(struct work_struct *data)111 static void kbase_event_process_noreport_worker(struct work_struct *data)
112 {
113 struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
114 work);
115 struct kbase_context *kctx = katom->kctx;
116
117 if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES)
118 kbase_jd_free_external_resources(katom);
119
120 mutex_lock(&kctx->jctx.lock);
121 kbase_event_process(kctx, katom);
122 mutex_unlock(&kctx->jctx.lock);
123 }
124
125 /**
126 * kbase_event_process_noreport - Process atoms that do not return an event
127 * @kctx: Context pointer
128 * @katom: Atom to be processed
129 *
130 * Atoms that do not have external resources will be processed immediately.
131 * Atoms that do have external resources will be processed on a workqueue, in
132 * order to avoid locking issues.
133 */
kbase_event_process_noreport(struct kbase_context *kctx, struct kbase_jd_atom *katom)134 static void kbase_event_process_noreport(struct kbase_context *kctx,
135 struct kbase_jd_atom *katom)
136 {
137 if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
138 INIT_WORK(&katom->work, kbase_event_process_noreport_worker);
139 queue_work(kctx->event_workq, &katom->work);
140 } else {
141 kbase_event_process(kctx, katom);
142 }
143 }
144
145 /**
146 * kbase_event_coalesce - Move pending events to the main event list
147 * @kctx: Context pointer
148 *
149 * kctx->event_list and kctx->event_coalesce_count must be protected
150 * by a lock unless this is the last thread using them
151 * (and we're about to terminate the lock).
152 *
153 * Return: The number of pending events moved to the main event list
154 */
kbase_event_coalesce(struct kbase_context *kctx)155 static int kbase_event_coalesce(struct kbase_context *kctx)
156 {
157 const int event_count = kctx->event_coalesce_count;
158
159 /* Join the list of pending events onto the tail of the main list
160 and reset it */
161 list_splice_tail_init(&kctx->event_coalesce_list, &kctx->event_list);
162 kctx->event_coalesce_count = 0;
163
164 /* Return the number of events moved */
165 return event_count;
166 }
167
kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)168 void kbase_event_post(struct kbase_context *ctx, struct kbase_jd_atom *atom)
169 {
170 if (atom->core_req & BASE_JD_REQ_EVENT_ONLY_ON_FAILURE) {
171 if (atom->event_code == BASE_JD_EVENT_DONE) {
172 /* Don't report the event */
173 kbase_event_process_noreport(ctx, atom);
174 return;
175 }
176 }
177
178 if (atom->core_req & BASEP_JD_REQ_EVENT_NEVER) {
179 /* Don't report the event */
180 kbase_event_process_noreport(ctx, atom);
181 return;
182 }
183 KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(atom, TL_ATOM_STATE_POSTED);
184 if (atom->core_req & BASE_JD_REQ_EVENT_COALESCE) {
185 /* Don't report the event until other event(s) have completed */
186 mutex_lock(&ctx->event_mutex);
187 list_add_tail(&atom->dep_item[0], &ctx->event_coalesce_list);
188 ++ctx->event_coalesce_count;
189 mutex_unlock(&ctx->event_mutex);
190 } else {
191 /* Report the event and any pending events now */
192 int event_count = 1;
193
194 mutex_lock(&ctx->event_mutex);
195 event_count += kbase_event_coalesce(ctx);
196 list_add_tail(&atom->dep_item[0], &ctx->event_list);
197 atomic_add(event_count, &ctx->event_count);
198 mutex_unlock(&ctx->event_mutex);
199
200 kbase_event_wakeup(ctx);
201 }
202 }
203 KBASE_EXPORT_TEST_API(kbase_event_post);
204
kbase_event_close(struct kbase_context *kctx)205 void kbase_event_close(struct kbase_context *kctx)
206 {
207 mutex_lock(&kctx->event_mutex);
208 atomic_set(&kctx->event_closed, true);
209 mutex_unlock(&kctx->event_mutex);
210 kbase_event_wakeup(kctx);
211 }
212
kbase_event_init(struct kbase_context *kctx)213 int kbase_event_init(struct kbase_context *kctx)
214 {
215 KBASE_DEBUG_ASSERT(kctx);
216
217 INIT_LIST_HEAD(&kctx->event_list);
218 INIT_LIST_HEAD(&kctx->event_coalesce_list);
219 mutex_init(&kctx->event_mutex);
220 atomic_set(&kctx->event_count, 0);
221 kctx->event_coalesce_count = 0;
222 atomic_set(&kctx->event_closed, false);
223 kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1);
224
225 if (NULL == kctx->event_workq)
226 return -EINVAL;
227
228 return 0;
229 }
230
231 KBASE_EXPORT_TEST_API(kbase_event_init);
232
kbase_event_cleanup(struct kbase_context *kctx)233 void kbase_event_cleanup(struct kbase_context *kctx)
234 {
235 int event_count;
236
237 KBASE_DEBUG_ASSERT(kctx);
238 KBASE_DEBUG_ASSERT(kctx->event_workq);
239
240 flush_workqueue(kctx->event_workq);
241 destroy_workqueue(kctx->event_workq);
242
243 /* We use kbase_event_dequeue to remove the remaining events as that
244 * deals with all the cleanup needed for the atoms.
245 *
246 * Note: use of kctx->event_list without a lock is safe because this must be the last
247 * thread using it (because we're about to terminate the lock)
248 */
249 event_count = kbase_event_coalesce(kctx);
250 atomic_add(event_count, &kctx->event_count);
251
252 while (!list_empty(&kctx->event_list)) {
253 struct base_jd_event_v2 event;
254
255 kbase_event_dequeue(kctx, &event);
256 }
257 }
258
259 KBASE_EXPORT_TEST_API(kbase_event_cleanup);
260