1 /*
2  * Copyright (c) Huawei Technologies Co., Ltd. 2020-2023. All rights reserved.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "pthread_impl.h"
17 
18 /*
19  * struct waiter
20  *
21  * Waiter objects have automatic storage on the waiting thread, and
22  * are used in building a linked list representing waiters currently
23  * waiting on the condition variable or a group of waiters woken
24  * together by a broadcast or signal; in the case of signal, this is a
25  * degenerate list of one member.
26  *
27  * Waiter lists attached to the condition variable itself are
28  * protected by the lock on the cv. Detached waiter lists are never
29  * modified again, but can only be traversed in reverse order, and are
30  * protected by the "barrier" locks in each node, which are unlocked
31  * in turn to control wake order.
32  *
33  * Since process-shared cond var semantics do not necessarily allow
34  * one thread to see another's automatic storage (they may be in
35  * different processes), the waiter list is not used for the
36  * process-shared case, but the structure is still used to store data
37  * needed by the cancellation cleanup handler.
38  */
39 
40 struct waiter {
41     struct waiter *prev, *next;
42     volatile int state, barrier;
43     volatile int *notify;
44 };
45 
46 /* Self-synchronized-destruction-safe lock functions */
47 
lock(volatile int *l)48 static inline void lock(volatile int *l)
49 {
50     if (a_cas(l, 0, 1)) {
51         a_cas(l, 1, 2);
52         do {
53             __wait(l, 0, 2, 1);
54         } while (a_cas(l, 0, 2));
55     }
56 }
57 
unlock(volatile int *l)58 static inline void unlock(volatile int *l)
59 {
60     if (a_swap(l, 0) == 2) {
61         __wake(l, 1, 1);
62     }
63 }
64 
unlock_requeue(volatile int *l, volatile int *r, int w)65 static inline void unlock_requeue(volatile int *l, volatile int *r, int w)
66 {
67     a_store(l, 0);
68     if (w) {
69         __wake(l, 1, 1);
70     } else {
71         __syscall(SYS_futex, l, FUTEX_REQUEUE|FUTEX_PRIVATE, 0, 1, r) != -ENOSYS
72         || __syscall(SYS_futex, l, FUTEX_REQUEUE, 0, 1, r);
73     }
74 }
75 
76 enum {
77     WAITING,
78     SIGNALED,
79     LEAVING,
80 };
81 
__pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict m, const struct timespec *restrict ts)82 int __pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restrict m, const struct timespec *restrict ts)
83 {
84     struct waiter node = { 0 };
85     int e, seq, clock = c->_c_clock, cs, shared = 0, oldstate, tmp;
86     volatile int *fut;
87 
88     if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid)
89         return EPERM;
90 
91     if (ts && ts->tv_nsec >= 1000000000UL)
92         return EINVAL;
93 
94     __pthread_testcancel();
95 
96     if (c->_c_shared) {
97         shared = 1;
98         fut = &c->_c_seq;
99         seq = c->_c_seq;
100         a_inc(&c->_c_waiters);
101     } else {
102         lock(&c->_c_lock);
103 
104         seq = node.barrier = 2;
105         fut = &node.barrier;
106         node.state = WAITING;
107         node.next = c->_c_head;
108         c->_c_head = &node;
109         if (!c->_c_tail) {
110             c->_c_tail = &node;
111         } else {
112             node.next->prev = &node;
113         }
114 
115         unlock(&c->_c_lock);
116     }
117 
118     __pthread_mutex_unlock(m);
119 
120     __pthread_setcancelstate(PTHREAD_CANCEL_MASKED, &cs);
121     if (cs == PTHREAD_CANCEL_DISABLE) {
122         __pthread_setcancelstate(cs, 0);
123     }
124 
125     do {
126         e = __timedwait_cp(fut, seq, clock, ts, !shared);
127     } while (*fut==seq && (!e || e==EINTR));
128     if (e == EINTR) {
129         e = 0;
130     }
131 
132     if (shared) {
133         /* Suppress cancellation if a signal was potentially
134          * consumed; this is a legitimate form of spurious
135          * wake even if not. */
136         if (e == ECANCELED && c->_c_seq != seq) {
137             e = 0;
138         }
139         if (a_fetch_add(&c->_c_waiters, -1) == -0x7fffffff) {
140             __wake(&c->_c_waiters, 1, 0);
141         }
142         oldstate = WAITING;
143         goto relock;
144     }
145 
146     oldstate = a_cas(&node.state, WAITING, LEAVING);
147     if (oldstate == WAITING) {
148         /* Access to cv object is valid because this waiter was not
149          * yet signaled and a new signal/broadcast cannot return
150          * after seeing a LEAVING waiter without getting notified
151          * via the futex notify below. */
152 
153         lock(&c->_c_lock);
154 
155         if (c->_c_head == &node) {
156             c->_c_head = node.next;
157         } else if (node.prev) {
158             node.prev->next = node.next;
159         }
160         if (c->_c_tail == &node) {
161             c->_c_tail = node.prev;
162         } else if (node.next) {
163             node.next->prev = node.prev;
164         }
165 
166         unlock(&c->_c_lock);
167 
168         if (node.notify) {
169             if (a_fetch_add(node.notify, -1) == 1) {
170                 __wake(node.notify, 1, 1);
171             }
172         }
173     } else {
174         /* Lock barrier first to control wake order. */
175         lock(&node.barrier);
176     }
177 
178 relock:
179     /* Errors locking the mutex override any existing error or
180      * cancellation, since the caller must see them to know the
181      * state of the mutex. */
182     if ((tmp = pthread_mutex_lock(m))) {
183         e = tmp;
184     }
185 
186     if (oldstate == WAITING) {
187         goto done;
188     }
189 
190     if (!node.next && !(m->_m_type & 8)) {
191         a_inc(&m->_m_waiters);
192     }
193 
194     /* Unlock the barrier that's holding back the next waiter, and
195      * either wake it or requeue it to the mutex. */
196     if (node.prev) {
197         int val = m->_m_lock;
198         if (val > 0) {
199             a_cas(&m->_m_lock, val, val|0x80000000);
200         }
201         unlock_requeue(&node.prev->barrier, &m->_m_lock, m->_m_type & (8|128));
202     } else if (!(m->_m_type & 8)) {
203         a_dec(&m->_m_waiters);
204     }
205 
206     /* Since a signal was consumed, cancellation is not permitted. */
207     if (e == ECANCELED) {
208         e = 0;
209     }
210 
211 done:
212     __pthread_setcancelstate(cs, 0);
213 
214     if (e == ECANCELED) {
215         __pthread_testcancel();
216         __pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 0);
217     }
218 
219     return e;
220 }
221 
__private_cond_signal(pthread_cond_t *c, int n)222 int __private_cond_signal(pthread_cond_t *c, int n)
223 {
224     struct waiter *p, *first = 0;
225     volatile int ref = 0;
226     int cur;
227     #ifdef a_ll_p
228     if (a_ll_p(&c->_c_tail) == NULL) {
229         /* Wait for any waiters in the LEAVING state to remove
230         * themselves from the list before returning or allowing
231         * signaled threads to proceed. */
232         while ((cur = ref)) __wait(&ref, 0, cur, 1);
233 
234         return 0;
235     }
236     #endif
237     lock(&c->_c_lock);
238     for (p = c->_c_tail; n && p; p = p->prev) {
239         if (a_cas(&p->state, WAITING, SIGNALED) != WAITING) {
240             ref++;
241             p->notify = &ref;
242         } else {
243             n--;
244             if (!first) {
245                 first = p;
246             }
247         }
248     }
249     /* Split the list, leaving any remainder on the cv. */
250     if (p) {
251         if (p->next) {
252             p->next->prev = 0;
253         }
254         p->next = 0;
255     } else {
256         c->_c_head = 0;
257     }
258     c->_c_tail = p;
259     unlock(&c->_c_lock);
260 
261     /* Wait for any waiters in the LEAVING state to remove
262      * themselves from the list before returning or allowing
263      * signaled threads to proceed. */
264     while ((cur = ref)) {
265         __wait(&ref, 0, cur, 1);
266     }
267 
268     /* Allow first signaled waiter, if any, to proceed. */
269     if (first) {
270         unlock(&first->barrier);
271     }
272 
273     return 0;
274 }
275 
276 weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait);
277