1 #include <aio.h>
2 #include <pthread.h>
3 #include <semaphore.h>
4 #include <limits.h>
5 #include <errno.h>
6 #include <unistd.h>
7 #include <stdlib.h>
8 #include <sys/auxv.h>
9 #include <unsupported_api.h>
10 #include "syscall.h"
11 #include "atomic.h"
12 #include "pthread_impl.h"
13 #include "aio_impl.h"
14
15 #define malloc __libc_malloc
16 #define calloc __libc_calloc
17 #define realloc __libc_realloc
18 #define free __libc_free
19
20 /* The following is a threads-based implementation of AIO with minimal
21 * dependence on implementation details. Most synchronization is
22 * performed with pthread primitives, but atomics and futex operations
23 * are used for notification in a couple places where the pthread
24 * primitives would be inefficient or impractical.
25 *
26 * For each fd with outstanding aio operations, an aio_queue structure
27 * is maintained. These are reference-counted and destroyed by the last
28 * aio worker thread to exit. Accessing any member of the aio_queue
29 * structure requires a lock on the aio_queue. Adding and removing aio
30 * queues themselves requires a write lock on the global map object,
31 * a 4-level table mapping file descriptor numbers to aio queues. A
32 * read lock on the map is used to obtain locks on existing queues by
33 * excluding destruction of the queue by a different thread while it is
34 * being locked.
35 *
36 * Each aio queue has a list of active threads/operations. Presently there
37 * is a one to one relationship between threads and operations. The only
38 * members of the aio_thread structure which are accessed by other threads
39 * are the linked list pointers, op (which is immutable), running (which
40 * is updated atomically), and err (which is synchronized via running),
41 * so no locking is necessary. Most of the other other members are used
42 * for sharing data between the main flow of execution and cancellation
43 * cleanup handler.
44 *
45 * Taking any aio locks requires having all signals blocked. This is
46 * necessary because aio_cancel is needed by close, and close is required
47 * to be async-signal safe. All aio worker threads run with all signals
48 * blocked permanently.
49 */
50
51 struct aio_thread {
52 pthread_t td;
53 struct aiocb *cb;
54 struct aio_thread *next, *prev;
55 struct aio_queue *q;
56 volatile int running;
57 int err, op;
58 ssize_t ret;
59 };
60
61 struct aio_queue {
62 int fd, seekable, append, ref, init;
63 pthread_mutex_t lock;
64 pthread_cond_t cond;
65 struct aio_thread *head;
66 };
67
68 struct aio_args {
69 struct aiocb *cb;
70 struct aio_queue *q;
71 int op;
72 sem_t sem;
73 };
74
75 static pthread_rwlock_t maplock = PTHREAD_RWLOCK_INITIALIZER;
76 static struct aio_queue *****map;
77 static volatile int aio_fd_cnt;
78 volatile int __aio_fut;
79
80 static size_t io_thread_stack_size;
81
82 #define MAX(a,b) ((a)>(b) ? (a) : (b))
83
__aio_get_queue(int fd, int need)84 static struct aio_queue *__aio_get_queue(int fd, int need)
85 {
86 sigset_t allmask, origmask;
87 int masked = 0;
88 if (fd < 0) {
89 errno = EBADF;
90 return 0;
91 }
92 int a=fd>>24;
93 unsigned char b=fd>>16, c=fd>>8, d=fd;
94 struct aio_queue *q = 0;
95 pthread_rwlock_rdlock(&maplock);
96 if ((!map || !map[a] || !map[a][b] || !map[a][b][c] || !(q=map[a][b][c][d])) && need) {
97 pthread_rwlock_unlock(&maplock);
98 if (fcntl(fd, F_GETFD) < 0) return 0;
99 sigfillset(&allmask);
100 masked = 1;
101 pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
102 pthread_rwlock_wrlock(&maplock);
103 if (!io_thread_stack_size) {
104 unsigned long val = __getauxval(AT_MINSIGSTKSZ);
105 io_thread_stack_size = MAX(MINSIGSTKSZ+2048, val+512);
106 }
107 if (!map) map = calloc(sizeof *map, (-1U/2+1)>>24);
108 if (!map) goto out;
109 if (!map[a]) map[a] = calloc(sizeof **map, 256);
110 if (!map[a]) goto out;
111 if (!map[a][b]) map[a][b] = calloc(sizeof ***map, 256);
112 if (!map[a][b]) goto out;
113 if (!map[a][b][c]) map[a][b][c] = calloc(sizeof ****map, 256);
114 if (!map[a][b][c]) goto out;
115 if (!(q = map[a][b][c][d])) {
116 map[a][b][c][d] = q = calloc(sizeof *****map, 1);
117 if (q) {
118 q->fd = fd;
119 pthread_mutex_init(&q->lock, 0);
120 pthread_cond_init(&q->cond, 0);
121 a_inc(&aio_fd_cnt);
122 }
123 }
124 }
125 if (q) pthread_mutex_lock(&q->lock);
126 out:
127 pthread_rwlock_unlock(&maplock);
128 if (masked) pthread_sigmask(SIG_SETMASK, &origmask, 0);
129 return q;
130 }
131
__aio_unref_queue(struct aio_queue *q)132 static void __aio_unref_queue(struct aio_queue *q)
133 {
134 if (q->ref > 1) {
135 q->ref--;
136 pthread_mutex_unlock(&q->lock);
137 return;
138 }
139
140 /* This is potentially the last reference, but a new reference
141 * may arrive since we cannot free the queue object without first
142 * taking the maplock, which requires releasing the queue lock. */
143 pthread_mutex_unlock(&q->lock);
144 pthread_rwlock_wrlock(&maplock);
145 pthread_mutex_lock(&q->lock);
146 if (q->ref == 1) {
147 int fd=q->fd;
148 int a=fd>>24;
149 unsigned char b=fd>>16, c=fd>>8, d=fd;
150 map[a][b][c][d] = 0;
151 a_dec(&aio_fd_cnt);
152 pthread_rwlock_unlock(&maplock);
153 pthread_mutex_unlock(&q->lock);
154 free(q);
155 } else {
156 q->ref--;
157 pthread_rwlock_unlock(&maplock);
158 pthread_mutex_unlock(&q->lock);
159 }
160 }
161
cleanup(void *ctx)162 static void cleanup(void *ctx)
163 {
164 struct aio_thread *at = ctx;
165 struct aio_queue *q = at->q;
166 struct aiocb *cb = at->cb;
167 struct sigevent sev = cb->aio_sigevent;
168
169 /* There are four potential types of waiters we could need to wake:
170 * 1. Callers of aio_cancel/close.
171 * 2. Callers of aio_suspend with a single aiocb.
172 * 3. Callers of aio_suspend with a list.
173 * 4. AIO worker threads waiting for sequenced operations.
174 * Types 1-3 are notified via atomics/futexes, mainly for AS-safety
175 * considerations. Type 4 is notified later via a cond var. */
176
177 cb->__ret = at->ret;
178 if (a_swap(&at->running, 0) < 0)
179 __wake(&at->running, -1, 1);
180 if (a_swap(&cb->__err, at->err) != EINPROGRESS)
181 __wake(&cb->__err, -1, 1);
182 if (a_swap(&__aio_fut, 0))
183 __wake(&__aio_fut, -1, 1);
184
185 pthread_mutex_lock(&q->lock);
186
187 if (at->next) at->next->prev = at->prev;
188 if (at->prev) at->prev->next = at->next;
189 else q->head = at->next;
190
191 /* Signal aio worker threads waiting for sequenced operations. */
192 pthread_cond_broadcast(&q->cond);
193
194 __aio_unref_queue(q);
195
196 if (sev.sigev_notify == SIGEV_SIGNAL) {
197 siginfo_t si = {
198 .si_signo = sev.sigev_signo,
199 .si_value = sev.sigev_value,
200 .si_code = SI_ASYNCIO,
201 .si_pid = getpid(),
202 .si_uid = getuid()
203 };
204 __syscall(SYS_rt_sigqueueinfo, si.si_pid, si.si_signo, &si);
205 }
206 if (sev.sigev_notify == SIGEV_THREAD) {
207 #ifdef FEATURE_PTHREAD_CANCEL
208 a_store(&__pthread_self()->cancel, 0);
209 #endif
210 sev.sigev_notify_function(sev.sigev_value);
211 }
212 }
213
io_thread_func(void *ctx)214 static void *io_thread_func(void *ctx)
215 {
216 struct aio_thread at, *p;
217
218 struct aio_args *args = ctx;
219 struct aiocb *cb = args->cb;
220 int fd = cb->aio_fildes;
221 int op = args->op;
222 void *buf = (void *)cb->aio_buf;
223 size_t len = cb->aio_nbytes;
224 off_t off = cb->aio_offset;
225
226 struct aio_queue *q = args->q;
227 ssize_t ret;
228
229 pthread_mutex_lock(&q->lock);
230 sem_post(&args->sem);
231
232 at.op = op;
233 at.running = 1;
234 at.ret = -1;
235 at.err = ECANCELED;
236 at.q = q;
237 at.td = __pthread_self();
238 at.cb = cb;
239 at.prev = 0;
240 if ((at.next = q->head)) at.next->prev = &at;
241 q->head = &at;
242
243 if (!q->init) {
244 int seekable = lseek(fd, 0, SEEK_CUR) >= 0;
245 q->seekable = seekable;
246 q->append = !seekable || (fcntl(fd, F_GETFL) & O_APPEND);
247 q->init = 1;
248 }
249
250 pthread_cleanup_push(cleanup, &at);
251
252 /* Wait for sequenced operations. */
253 if (op!=LIO_READ && (op!=LIO_WRITE || q->append)) {
254 for (;;) {
255 for (p=at.next; p && p->op!=LIO_WRITE; p=p->next);
256 if (!p) break;
257 pthread_cond_wait(&q->cond, &q->lock);
258 }
259 }
260
261 pthread_mutex_unlock(&q->lock);
262
263 switch (op) {
264 case LIO_WRITE:
265 ret = q->append ? write(fd, buf, len) : pwrite(fd, buf, len, off);
266 break;
267 case LIO_READ:
268 ret = !q->seekable ? read(fd, buf, len) : pread(fd, buf, len, off);
269 break;
270 case O_SYNC:
271 ret = fsync(fd);
272 break;
273 case O_DSYNC:
274 ret = fdatasync(fd);
275 break;
276 }
277 at.ret = ret;
278 at.err = ret<0 ? errno : 0;
279
280 pthread_cleanup_pop(1);
281
282 return 0;
283 }
284
submit(struct aiocb *cb, int op)285 static int submit(struct aiocb *cb, int op)
286 {
287 int ret = 0;
288 pthread_attr_t a;
289 sigset_t allmask, origmask;
290 pthread_t td;
291 struct aio_queue *q = __aio_get_queue(cb->aio_fildes, 1);
292 struct aio_args args = { .cb = cb, .op = op, .q = q };
293 sem_init(&args.sem, 0, 0);
294
295 if (!q) {
296 if (errno != EBADF) errno = EAGAIN;
297 cb->__ret = -1;
298 cb->__err = errno;
299 return -1;
300 }
301 q->ref++;
302 pthread_mutex_unlock(&q->lock);
303
304 if (cb->aio_sigevent.sigev_notify == SIGEV_THREAD) {
305 if (cb->aio_sigevent.sigev_notify_attributes)
306 a = *cb->aio_sigevent.sigev_notify_attributes;
307 else
308 pthread_attr_init(&a);
309 } else {
310 pthread_attr_init(&a);
311 pthread_attr_setstacksize(&a, io_thread_stack_size);
312 pthread_attr_setguardsize(&a, 0);
313 }
314 pthread_attr_setdetachstate(&a, PTHREAD_CREATE_DETACHED);
315 sigfillset(&allmask);
316 pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
317 cb->__err = EINPROGRESS;
318 if (pthread_create(&td, &a, io_thread_func, &args)) {
319 pthread_mutex_lock(&q->lock);
320 __aio_unref_queue(q);
321 cb->__err = errno = EAGAIN;
322 cb->__ret = ret = -1;
323 }
324 pthread_sigmask(SIG_SETMASK, &origmask, 0);
325
326 if (!ret) {
327 while (sem_wait(&args.sem));
328 }
329
330 return ret;
331 }
332
aio_read(struct aiocb *cb)333 int aio_read(struct aiocb *cb)
334 {
335 UNSUPPORTED_API_VOID(LITEOS_A);
336 return submit(cb, LIO_READ);
337 }
338
aio_write(struct aiocb *cb)339 int aio_write(struct aiocb *cb)
340 {
341 UNSUPPORTED_API_VOID(LITEOS_A);
342 return submit(cb, LIO_WRITE);
343 }
344
aio_fsync(int op, struct aiocb *cb)345 int aio_fsync(int op, struct aiocb *cb)
346 {
347 UNSUPPORTED_API_VOID(LITEOS_A);
348 if (op != O_SYNC && op != O_DSYNC) {
349 errno = EINVAL;
350 return -1;
351 }
352 return submit(cb, op);
353 }
354
aio_return(struct aiocb *cb)355 ssize_t aio_return(struct aiocb *cb)
356 {
357 UNSUPPORTED_API_VOID(LITEOS_A);
358 return cb->__ret;
359 }
360
aio_error(const struct aiocb *cb)361 int aio_error(const struct aiocb *cb)
362 {
363 UNSUPPORTED_API_VOID(LITEOS_A);
364 a_barrier();
365 return cb->__err & 0x7fffffff;
366 }
367
aio_cancel(int fd, struct aiocb *cb)368 int aio_cancel(int fd, struct aiocb *cb)
369 {
370 sigset_t allmask, origmask;
371 int ret = AIO_ALLDONE;
372 struct aio_thread *p;
373 struct aio_queue *q;
374
375 UNSUPPORTED_API_VOID(LITEOS_A);
376 /* Unspecified behavior case. Report an error. */
377 if (cb && fd != cb->aio_fildes) {
378 errno = EINVAL;
379 return -1;
380 }
381
382 sigfillset(&allmask);
383 pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
384
385 errno = ENOENT;
386 if (!(q = __aio_get_queue(fd, 0))) {
387 if (errno == EBADF) ret = -1;
388 goto done;
389 }
390
391 for (p = q->head; p; p = p->next) {
392 if (cb && cb != p->cb) continue;
393 /* Transition target from running to running-with-waiters */
394 if (a_cas(&p->running, 1, -1)) {
395 #ifdef FEATURE_PTHREAD_CANCEL
396 pthread_cancel(p->td);
397 #else
398 __syscall(SYS_tkill, p->td->tid, SIGCANCEL);
399 #endif
400 __wait(&p->running, 0, -1, 1);
401 if (p->err == ECANCELED) ret = AIO_CANCELED;
402 }
403 }
404
405 pthread_mutex_unlock(&q->lock);
406 done:
407 pthread_sigmask(SIG_SETMASK, &origmask, 0);
408 return ret;
409 }
410
__aio_close(int fd)411 int __aio_close(int fd)
412 {
413 a_barrier();
414 if (aio_fd_cnt) aio_cancel(fd, 0);
415 return fd;
416 }
417
__aio_atfork(int who)418 void __aio_atfork(int who)
419 {
420 if (who<0) {
421 pthread_rwlock_rdlock(&maplock);
422 return;
423 } else if (!who) {
424 pthread_rwlock_unlock(&maplock);
425 return;
426 }
427 aio_fd_cnt = 0;
428 if (pthread_rwlock_tryrdlock(&maplock)) {
429 /* Obtaining lock may fail if _Fork was called nor via
430 * fork. In this case, no further aio is possible from
431 * child and we can just null out map so __aio_close
432 * does not attempt to do anything. */
433 map = 0;
434 return;
435 }
436 if (map) for (int a=0; a<(-1U/2+1)>>24; a++)
437 if (map[a]) for (int b=0; b<256; b++)
438 if (map[a][b]) for (int c=0; c<256; c++)
439 if (map[a][b][c]) for (int d=0; d<256; d++)
440 map[a][b][c][d] = 0;
441 /* Re-initialize the rwlock rather than unlocking since there
442 * may have been more than one reference on it in the parent.
443 * We are not a lock holder anyway; the thread in the parent was. */
444 pthread_rwlock_init(&maplock, 0);
445 }
446
447 weak_alias(aio_cancel, aio_cancel64);
448 weak_alias(aio_error, aio_error64);
449 weak_alias(aio_fsync, aio_fsync64);
450 weak_alias(aio_read, aio_read64);
451 weak_alias(aio_write, aio_write64);
452 weak_alias(aio_return, aio_return64);
453