1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Fence mechanism for dma-buf to allow for asynchronous dma access
4 *
5 * Copyright (C) 2012 Canonical Ltd
6 * Copyright (C) 2012 Texas Instruments
7 *
8 * Authors:
9 * Rob Clark <robdclark@gmail.com>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11 */
12
13#ifndef __LINUX_DMA_FENCE_H
14#define __LINUX_DMA_FENCE_H
15
16#include <linux/err.h>
17#include <linux/wait.h>
18#include <linux/list.h>
19#include <linux/bitops.h>
20#include <linux/kref.h>
21#include <linux/sched.h>
22#include <linux/printk.h>
23#include <linux/rcupdate.h>
24
25struct dma_fence;
26struct dma_fence_ops;
27struct dma_fence_cb;
28
29/**
30 * struct dma_fence - software synchronization primitive
31 * @refcount: refcount for this fence
32 * @ops: dma_fence_ops associated with this fence
33 * @rcu: used for releasing fence with kfree_rcu
34 * @cb_list: list of all callbacks to call
35 * @lock: spin_lock_irqsave used for locking
36 * @context: execution context this fence belongs to, returned by
37 *           dma_fence_context_alloc()
38 * @seqno: the sequence number of this fence inside the execution context,
39 * can be compared to decide which fence would be signaled later.
40 * @flags: A mask of DMA_FENCE_FLAG_* defined below
41 * @timestamp: Timestamp when the fence was signaled.
42 * @error: Optional, only valid if < 0, must be set before calling
43 * dma_fence_signal, indicates that the fence has completed with an error.
44 *
45 * the flags member must be manipulated and read using the appropriate
46 * atomic ops (bit_*), so taking the spinlock will not be needed most
47 * of the time.
48 *
49 * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled
50 * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling
51 * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called
52 * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the
53 * implementer of the fence for its own purposes. Can be used in different
54 * ways by different fence implementers, so do not rely on this.
55 *
56 * Since atomic bitops are used, this is not guaranteed to be the case.
57 * Particularly, if the bit was set, but dma_fence_signal was called right
58 * before this bit was set, it would have been able to set the
59 * DMA_FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called.
60 * Adding a check for DMA_FENCE_FLAG_SIGNALED_BIT after setting
61 * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that
62 * after dma_fence_signal was called, any enable_signaling call will have either
63 * been completed, or never called at all.
64 */
65struct dma_fence {
66    spinlock_t *lock;
67    const struct dma_fence_ops *ops;
68    /*
69     * We clear the callback list on kref_put so that by the time we
70     * release the fence it is unused. No one should be adding to the
71     * cb_list that they don't themselves hold a reference for.
72     *
73     * The lifetime of the timestamp is similarly tied to both the
74     * rcu freelist and the cb_list. The timestamp is only set upon
75     * signaling while simultaneously notifying the cb_list. Ergo, we
76     * only use either the cb_list of timestamp. Upon destruction,
77     * neither are accessible, and so we can use the rcu. This means
78     * that the cb_list is *only* valid until the signal bit is set,
79     * and to read either you *must* hold a reference to the fence,
80     * and not just the rcu_read_lock.
81     *
82     * Listed in chronological order.
83     */
84    union {
85        struct list_head cb_list;
86        /* @cb_list replaced by @timestamp on dma_fence_signal() */
87        ktime_t timestamp;
88        /* @timestamp replaced by @rcu on dma_fence_release() */
89        struct rcu_head rcu;
90    };
91    u64 context;
92    u64 seqno;
93    unsigned long flags;
94    struct kref refcount;
95    int error;
96};
97
98enum dma_fence_flag_bits {
99    DMA_FENCE_FLAG_SIGNALED_BIT,
100    DMA_FENCE_FLAG_TIMESTAMP_BIT,
101    DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
102    DMA_FENCE_FLAG_USER_BITS, /* must always be last member */
103};
104
105typedef void (*dma_fence_func_t)(struct dma_fence *fence, struct dma_fence_cb *cb);
106
107/**
108 * struct dma_fence_cb - callback for dma_fence_add_callback()
109 * @node: used by dma_fence_add_callback() to append this struct to fence::cb_list
110 * @func: dma_fence_func_t to call
111 *
112 * This struct will be initialized by dma_fence_add_callback(), additional
113 * data can be passed along by embedding dma_fence_cb in another struct.
114 */
115struct dma_fence_cb {
116    struct list_head node;
117    dma_fence_func_t func;
118};
119
120/**
121 * struct dma_fence_ops - operations implemented for fence
122 *
123 */
124struct dma_fence_ops {
125    /**
126     * @use_64bit_seqno:
127     *
128     * True if this dma_fence implementation uses 64bit seqno, false
129     * otherwise.
130     */
131    bool use_64bit_seqno;
132
133    /**
134     * @get_driver_name
135     *
136     * Returns the driver name. This is a callback to allow drivers to
137     * compute the name at runtime, without having it to store permanently
138     * for each fence, or build a cache of some sort.
139     *
140     * This callback is mandatory.
141     */
142    const char *(*get_driver_name)(struct dma_fence *fence);
143
144    /**
145     * @get_timeline_name
146     *
147     * Return the name of the context this fence belongs to. This is a
148     * callback to allow drivers to compute the name at runtime, without
149     * having it to store permanently for each fence, or build a cache of
150     * some sort.
151     *
152     * This callback is mandatory.
153     */
154    const char *(*get_timeline_name)(struct dma_fence *fence);
155
156    /**
157     * @enable_signaling
158     *
159     * Enable software signaling of fence.
160     *
161     * For fence implementations that have the capability for hw->hw
162     * signaling, they can implement this op to enable the necessary
163     * interrupts, or insert commands into cmdstream, etc, to avoid these
164     * costly operations for the common case where only hw->hw
165     * synchronization is required.  This is called in the first
166     * dma_fence_wait() or dma_fence_add_callback() path to let the fence
167     * implementation know that there is another driver waiting on the
168     * signal (ie. hw->sw case).
169     *
170     * This function can be called from atomic context, but not
171     * from irq context, so normal spinlocks can be used.
172     *
173     * A return value of false indicates the fence already passed,
174     * or some failure occurred that made it impossible to enable
175     * signaling. True indicates successful enabling.
176     *
177     * &dma_fence.error may be set in enable_signaling, but only when false
178     * is returned.
179     *
180     * Since many implementations can call dma_fence_signal() even when before
181     * @enable_signaling has been called there's a race window, where the
182     * dma_fence_signal() might result in the final fence reference being
183     * released and its memory freed. To avoid this, implementations of this
184     * callback should grab their own reference using dma_fence_get(), to be
185     * released when the fence is signalled (through e.g. the interrupt
186     * handler).
187     *
188     * This callback is optional. If this callback is not present, then the
189     * driver must always have signaling enabled.
190     */
191    bool (*enable_signaling)(struct dma_fence *fence);
192
193    /**
194     * @signaled
195     *
196     * Peek whether the fence is signaled, as a fastpath optimization for
197     * e.g. dma_fence_wait() or dma_fence_add_callback(). Note that this
198     * callback does not need to make any guarantees beyond that a fence
199     * once indicates as signalled must always return true from this
200     * callback. This callback may return false even if the fence has
201     * completed already, in this case information hasn't propogated throug
202     * the system yet. See also dma_fence_is_signaled().
203     *
204     * May set &dma_fence.error if returning true.
205     *
206     * This callback is optional.
207     */
208    bool (*signaled)(struct dma_fence *fence);
209
210    /**
211     * @wait
212     *
213     * Custom wait implementation, defaults to dma_fence_default_wait() if
214     * not set.
215     *
216     * The dma_fence_default_wait implementation should work for any fence, as long
217     * as @enable_signaling works correctly. This hook allows drivers to
218     * have an optimized version for the case where a process context is
219     * already available, e.g. if @enable_signaling for the general case
220     * needs to set up a worker thread.
221     *
222     * Must return -ERESTARTSYS if the wait is intr = true and the wait was
223     * interrupted, and remaining jiffies if fence has signaled, or 0 if wait
224     * timed out. Can also return other error values on custom implementations,
225     * which should be treated as if the fence is signaled. For example a hardware
226     * lockup could be reported like that.
227     *
228     * This callback is optional.
229     */
230    signed long (*wait)(struct dma_fence *fence, bool intr, signed long timeout);
231
232    /**
233     * @release
234     *
235     * Called on destruction of fence to release additional resources.
236     * Can be called from irq context.  This callback is optional. If it is
237     * NULL, then dma_fence_free() is instead called as the default
238     * implementation.
239     */
240    void (*release)(struct dma_fence *fence);
241
242    /**
243     * @fence_value_str
244     *
245     * Callback to fill in free-form debug info specific to this fence, like
246     * the sequence number.
247     *
248     * This callback is optional.
249     */
250    void (*fence_value_str)(struct dma_fence *fence, char *str, int size);
251
252    /**
253     * @timeline_value_str
254     *
255     * Fills in the current value of the timeline as a string, like the
256     * sequence number. Note that the specific fence passed to this function
257     * should not matter, drivers should only use it to look up the
258     * corresponding timeline structures.
259     */
260    void (*timeline_value_str)(struct dma_fence *fence, char *str, int size);
261};
262
263void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, u64 seqno);
264
265void dma_fence_release(struct kref *kref);
266void dma_fence_free(struct dma_fence *fence);
267
268/**
269 * dma_fence_put - decreases refcount of the fence
270 * @fence: fence to reduce refcount of
271 */
272static inline void dma_fence_put(struct dma_fence *fence)
273{
274    if (fence) {
275        kref_put(&fence->refcount, dma_fence_release);
276    }
277}
278
279/**
280 * dma_fence_get - increases refcount of the fence
281 * @fence: fence to increase refcount of
282 *
283 * Returns the same fence, with refcount increased by 1.
284 */
285static inline struct dma_fence *dma_fence_get(struct dma_fence *fence)
286{
287    if (fence) {
288        kref_get(&fence->refcount);
289    }
290    return fence;
291}
292
293/**
294 * dma_fence_get_rcu - get a fence from a dma_resv_list with
295 *                     rcu read lock
296 * @fence: fence to increase refcount of
297 *
298 * Function returns NULL if no refcount could be obtained, or the fence.
299 */
300static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
301{
302    if (kref_get_unless_zero(&fence->refcount)) {
303        return fence;
304    } else {
305        return NULL;
306    }
307}
308
309/**
310 * dma_fence_get_rcu_safe  - acquire a reference to an RCU tracked fence
311 * @fencep: pointer to fence to increase refcount of
312 *
313 * Function returns NULL if no refcount could be obtained, or the fence.
314 * This function handles acquiring a reference to a fence that may be
315 * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU),
316 * so long as the caller is using RCU on the pointer to the fence.
317 *
318 * An alternative mechanism is to employ a seqlock to protect a bunch of
319 * fences, such as used by struct dma_resv. When using a seqlock,
320 * the seqlock must be taken before and checked after a reference to the
321 * fence is acquired (as shown here).
322 *
323 * The caller is required to hold the RCU read lock.
324 */
325static inline struct dma_fence *dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep)
326{
327    do {
328        struct dma_fence *fence;
329
330        fence = rcu_dereference(*fencep);
331        if (!fence) {
332            return NULL;
333        }
334
335        if (!dma_fence_get_rcu(fence)) {
336            continue;
337        }
338
339        /* The atomic_inc_not_zero() inside dma_fence_get_rcu()
340         * provides a full memory barrier upon success (such as now).
341         * This is paired with the write barrier from assigning
342         * to the __rcu protected fence pointer so that if that
343         * pointer still matches the current fence, we know we
344         * have successfully acquire a reference to it. If it no
345         * longer matches, we are holding a reference to some other
346         * reallocated pointer. This is possible if the allocator
347         * is using a freelist like SLAB_TYPESAFE_BY_RCU where the
348         * fence remains valid for the RCU grace period, but it
349         * may be reallocated. When using such allocators, we are
350         * responsible for ensuring the reference we get is to
351         * the right fence, as below.
352         */
353        if (fence == rcu_access_pointer(*fencep)) {
354            return rcu_pointer_handoff(fence);
355        }
356
357        dma_fence_put(fence);
358    } while (1);
359}
360
361#ifdef CONFIG_LOCKDEP
362bool dma_fence_begin_signalling(void);
363void dma_fence_end_signalling(bool cookie);
364void _dma_fence_might_wait(void);
365#else
366static inline bool dma_fence_begin_signalling(void)
367{
368    return true;
369}
370static inline void dma_fence_end_signalling(bool cookie)
371{
372}
373static inline void _dma_fence_might_wait(void)
374{
375}
376#endif
377
378int dma_fence_signal(struct dma_fence *fence);
379int dma_fence_signal_locked(struct dma_fence *fence);
380int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
381int dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp);
382signed long dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout);
383int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb, dma_fence_func_t func);
384bool dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb);
385void dma_fence_enable_sw_signaling(struct dma_fence *fence);
386
387/**
388 * dma_fence_is_signaled_locked - Return an indication if the fence
389 *                                is signaled yet.
390 * @fence: the fence to check
391 *
392 * Returns true if the fence was already signaled, false if not. Since this
393 * function doesn't enable signaling, it is not guaranteed to ever return
394 * true if dma_fence_add_callback(), dma_fence_wait() or
395 * dma_fence_enable_sw_signaling() haven't been called before.
396 *
397 * This function requires &dma_fence.lock to be held.
398 *
399 * See also dma_fence_is_signaled().
400 */
401static inline bool dma_fence_is_signaled_locked(struct dma_fence *fence)
402{
403    if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
404        return true;
405    }
406
407    if (fence->ops->signaled && fence->ops->signaled(fence)) {
408        dma_fence_signal_locked(fence);
409        return true;
410    }
411
412    return false;
413}
414
415/**
416 * dma_fence_is_signaled - Return an indication if the fence is signaled yet.
417 * @fence: the fence to check
418 *
419 * Returns true if the fence was already signaled, false if not. Since this
420 * function doesn't enable signaling, it is not guaranteed to ever return
421 * true if dma_fence_add_callback(), dma_fence_wait() or
422 * dma_fence_enable_sw_signaling() haven't been called before.
423 *
424 * It's recommended for seqno fences to call dma_fence_signal when the
425 * operation is complete, it makes it possible to prevent issues from
426 * wraparound between time of issue and time of use by checking the return
427 * value of this function before calling hardware-specific wait instructions.
428 *
429 * See also dma_fence_is_signaled_locked().
430 */
431static inline bool dma_fence_is_signaled(struct dma_fence *fence)
432{
433    if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
434        return true;
435    }
436
437    if (fence->ops->signaled && fence->ops->signaled(fence)) {
438        dma_fence_signal(fence);
439        return true;
440    }
441
442    return false;
443}
444
445/**
446 * __dma_fence_is_later - return if f1 is chronologically later than f2
447 * @f1: the first fence's seqno
448 * @f2: the second fence's seqno from the same context
449 * @ops: dma_fence_ops associated with the seqno
450 *
451 * Returns true if f1 is chronologically later than f2. Both fences must be
452 * from the same context, since a seqno is not common across contexts.
453 */
454static inline bool __dma_fence_is_later(u64 f1, u64 f2, const struct dma_fence_ops *ops)
455{
456    /* This is for backward compatibility with drivers which can only handle
457     * 32bit sequence numbers. Use a 64bit compare when the driver says to
458     * do so.
459     */
460    if (ops->use_64bit_seqno) {
461        return f1 > f2;
462    }
463
464    return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
465}
466
467/**
468 * dma_fence_is_later - return if f1 is chronologically later than f2
469 * @f1: the first fence from the same context
470 * @f2: the second fence from the same context
471 *
472 * Returns true if f1 is chronologically later than f2. Both fences must be
473 * from the same context, since a seqno is not re-used across contexts.
474 */
475static inline bool dma_fence_is_later(struct dma_fence *f1, struct dma_fence *f2)
476{
477    if (WARN_ON(f1->context != f2->context)) {
478        return false;
479    }
480
481    return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
482}
483
484/**
485 * dma_fence_later - return the chronologically later fence
486 * @f1:    the first fence from the same context
487 * @f2:    the second fence from the same context
488 *
489 * Returns NULL if both fences are signaled, otherwise the fence that would be
490 * signaled last. Both fences must be from the same context, since a seqno is
491 * not re-used across contexts.
492 */
493static inline struct dma_fence *dma_fence_later(struct dma_fence *f1, struct dma_fence *f2)
494{
495    if (WARN_ON(f1->context != f2->context)) {
496        return NULL;
497    }
498
499    /*
500     * Can't check just DMA_FENCE_FLAG_SIGNALED_BIT here, it may never
501     * have been set if enable_signaling wasn't called, and enabling that
502     * here is overkill.
503     */
504    if (dma_fence_is_later(f1, f2)) {
505        return dma_fence_is_signaled(f1) ? NULL : f1;
506    } else {
507        return dma_fence_is_signaled(f2) ? NULL : f2;
508    }
509}
510
511/**
512 * dma_fence_get_status_locked - returns the status upon completion
513 * @fence: the dma_fence to query
514 *
515 * Drivers can supply an optional error status condition before they signal
516 * the fence (to indicate whether the fence was completed due to an error
517 * rather than success). The value of the status condition is only valid
518 * if the fence has been signaled, dma_fence_get_status_locked() first checks
519 * the signal state before reporting the error status.
520 *
521 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
522 * been signaled without an error condition, or a negative error code
523 * if the fence has been completed in err.
524 */
525static inline int dma_fence_get_status_locked(struct dma_fence *fence)
526{
527    if (dma_fence_is_signaled_locked(fence)) {
528        return fence->error ?: 1;
529    } else {
530        return 0;
531    }
532}
533
534int dma_fence_get_status(struct dma_fence *fence);
535
536/**
537 * dma_fence_set_error - flag an error condition on the fence
538 * @fence: the dma_fence
539 * @error: the error to store
540 *
541 * Drivers can supply an optional error status condition before they signal
542 * the fence, to indicate that the fence was completed due to an error
543 * rather than success. This must be set before signaling (so that the value
544 * is visible before any waiters on the signal callback are woken). This
545 * helper exists to help catching erroneous setting of #dma_fence.error.
546 */
547static inline void dma_fence_set_error(struct dma_fence *fence, int error)
548{
549    WARN_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
550    WARN_ON(error >= 0 || error < -MAX_ERRNO);
551
552    fence->error = error;
553}
554
555signed long dma_fence_wait_timeout(struct dma_fence *, bool intr, signed long timeout);
556signed long dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, bool intr, signed long timeout,
557                                       uint32_t *idx);
558
559/**
560 * dma_fence_wait - sleep until the fence gets signaled
561 * @fence: the fence to wait on
562 * @intr: if true, do an interruptible wait
563 *
564 * This function will return -ERESTARTSYS if interrupted by a signal,
565 * or 0 if the fence was signaled. Other error values may be
566 * returned on custom implementations.
567 *
568 * Performs a synchronous wait on this fence. It is assumed the caller
569 * directly or indirectly holds a reference to the fence, otherwise the
570 * fence might be freed before return, resulting in undefined behavior.
571 *
572 * See also dma_fence_wait_timeout() and dma_fence_wait_any_timeout().
573 */
574static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
575{
576    signed long ret;
577
578    /* Since dma_fence_wait_timeout cannot timeout with
579     * MAX_SCHEDULE_TIMEOUT, only valid return values are
580     * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT.
581     */
582    ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
583
584    return ret < 0 ? ret : 0;
585}
586
587struct dma_fence *dma_fence_get_stub(void);
588u64 dma_fence_context_alloc(unsigned num);
589
590#define DMA_FENCE_TRACE(f, fmt, args...)                                                                               \
591    do {                                                                                                               \
592        struct dma_fence *__ff = (f);                                                                                  \
593        if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE))                                                                        \
594            pr_info("f %llu#%llu: " fmt, __ff->context, __ff->seqno, ##args);                                          \
595    } while (0)
596
597#define DMA_FENCE_WARN(f, fmt, args...)                                                                                \
598    do {                                                                                                               \
599        struct dma_fence *__ff = (f);                                                                                  \
600        pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno, ##args);                                              \
601    } while (0)
602
603#define DMA_FENCE_ERR(f, fmt, args...)                                                                                 \
604    do {                                                                                                               \
605        struct dma_fence *__ff = (f);                                                                                  \
606        pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, ##args);                                               \
607    } while (0)
608
609#endif /* __LINUX_DMA_FENCE_H */
610