1570af302Sopenharmony_ci#include <aio.h>
2570af302Sopenharmony_ci#include <pthread.h>
3570af302Sopenharmony_ci#include <semaphore.h>
4570af302Sopenharmony_ci#include <limits.h>
5570af302Sopenharmony_ci#include <errno.h>
6570af302Sopenharmony_ci#include <unistd.h>
7570af302Sopenharmony_ci#include <stdlib.h>
8570af302Sopenharmony_ci#include <sys/auxv.h>
9570af302Sopenharmony_ci#include <unsupported_api.h>
10570af302Sopenharmony_ci#include "syscall.h"
11570af302Sopenharmony_ci#include "atomic.h"
12570af302Sopenharmony_ci#include "pthread_impl.h"
13570af302Sopenharmony_ci#include "aio_impl.h"
14570af302Sopenharmony_ci
15570af302Sopenharmony_ci#define malloc __libc_malloc
16570af302Sopenharmony_ci#define calloc __libc_calloc
17570af302Sopenharmony_ci#define realloc __libc_realloc
18570af302Sopenharmony_ci#define free __libc_free
19570af302Sopenharmony_ci
20570af302Sopenharmony_ci/* The following is a threads-based implementation of AIO with minimal
21570af302Sopenharmony_ci * dependence on implementation details. Most synchronization is
22570af302Sopenharmony_ci * performed with pthread primitives, but atomics and futex operations
23570af302Sopenharmony_ci * are used for notification in a couple places where the pthread
24570af302Sopenharmony_ci * primitives would be inefficient or impractical.
25570af302Sopenharmony_ci *
26570af302Sopenharmony_ci * For each fd with outstanding aio operations, an aio_queue structure
27570af302Sopenharmony_ci * is maintained. These are reference-counted and destroyed by the last
28570af302Sopenharmony_ci * aio worker thread to exit. Accessing any member of the aio_queue
29570af302Sopenharmony_ci * structure requires a lock on the aio_queue. Adding and removing aio
30570af302Sopenharmony_ci * queues themselves requires a write lock on the global map object,
31570af302Sopenharmony_ci * a 4-level table mapping file descriptor numbers to aio queues. A
32570af302Sopenharmony_ci * read lock on the map is used to obtain locks on existing queues by
33570af302Sopenharmony_ci * excluding destruction of the queue by a different thread while it is
34570af302Sopenharmony_ci * being locked.
35570af302Sopenharmony_ci *
36570af302Sopenharmony_ci * Each aio queue has a list of active threads/operations. Presently there
37570af302Sopenharmony_ci * is a one to one relationship between threads and operations. The only
38570af302Sopenharmony_ci * members of the aio_thread structure which are accessed by other threads
39570af302Sopenharmony_ci * are the linked list pointers, op (which is immutable), running (which
40570af302Sopenharmony_ci * is updated atomically), and err (which is synchronized via running),
41570af302Sopenharmony_ci * so no locking is necessary. Most of the other other members are used
42570af302Sopenharmony_ci * for sharing data between the main flow of execution and cancellation
43570af302Sopenharmony_ci * cleanup handler.
44570af302Sopenharmony_ci *
45570af302Sopenharmony_ci * Taking any aio locks requires having all signals blocked. This is
46570af302Sopenharmony_ci * necessary because aio_cancel is needed by close, and close is required
47570af302Sopenharmony_ci * to be async-signal safe. All aio worker threads run with all signals
48570af302Sopenharmony_ci * blocked permanently.
49570af302Sopenharmony_ci */
50570af302Sopenharmony_ci
51570af302Sopenharmony_cistruct aio_thread {
52570af302Sopenharmony_ci	pthread_t td;
53570af302Sopenharmony_ci	struct aiocb *cb;
54570af302Sopenharmony_ci	struct aio_thread *next, *prev;
55570af302Sopenharmony_ci	struct aio_queue *q;
56570af302Sopenharmony_ci	volatile int running;
57570af302Sopenharmony_ci	int err, op;
58570af302Sopenharmony_ci	ssize_t ret;
59570af302Sopenharmony_ci};
60570af302Sopenharmony_ci
61570af302Sopenharmony_cistruct aio_queue {
62570af302Sopenharmony_ci	int fd, seekable, append, ref, init;
63570af302Sopenharmony_ci	pthread_mutex_t lock;
64570af302Sopenharmony_ci	pthread_cond_t cond;
65570af302Sopenharmony_ci	struct aio_thread *head;
66570af302Sopenharmony_ci};
67570af302Sopenharmony_ci
68570af302Sopenharmony_cistruct aio_args {
69570af302Sopenharmony_ci	struct aiocb *cb;
70570af302Sopenharmony_ci	struct aio_queue *q;
71570af302Sopenharmony_ci	int op;
72570af302Sopenharmony_ci	sem_t sem;
73570af302Sopenharmony_ci};
74570af302Sopenharmony_ci
75570af302Sopenharmony_cistatic pthread_rwlock_t maplock = PTHREAD_RWLOCK_INITIALIZER;
76570af302Sopenharmony_cistatic struct aio_queue *****map;
77570af302Sopenharmony_cistatic volatile int aio_fd_cnt;
78570af302Sopenharmony_civolatile int __aio_fut;
79570af302Sopenharmony_ci
80570af302Sopenharmony_cistatic size_t io_thread_stack_size;
81570af302Sopenharmony_ci
82570af302Sopenharmony_ci#define MAX(a,b) ((a)>(b) ? (a) : (b))
83570af302Sopenharmony_ci
84570af302Sopenharmony_cistatic struct aio_queue *__aio_get_queue(int fd, int need)
85570af302Sopenharmony_ci{
86570af302Sopenharmony_ci	sigset_t allmask, origmask;
87570af302Sopenharmony_ci	int masked = 0;
88570af302Sopenharmony_ci	if (fd < 0) {
89570af302Sopenharmony_ci		errno = EBADF;
90570af302Sopenharmony_ci		return 0;
91570af302Sopenharmony_ci	}
92570af302Sopenharmony_ci	int a=fd>>24;
93570af302Sopenharmony_ci	unsigned char b=fd>>16, c=fd>>8, d=fd;
94570af302Sopenharmony_ci	struct aio_queue *q = 0;
95570af302Sopenharmony_ci	pthread_rwlock_rdlock(&maplock);
96570af302Sopenharmony_ci	if ((!map || !map[a] || !map[a][b] || !map[a][b][c] || !(q=map[a][b][c][d])) && need) {
97570af302Sopenharmony_ci		pthread_rwlock_unlock(&maplock);
98570af302Sopenharmony_ci		if (fcntl(fd, F_GETFD) < 0) return 0;
99570af302Sopenharmony_ci		sigfillset(&allmask);
100570af302Sopenharmony_ci		masked = 1;
101570af302Sopenharmony_ci		pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
102570af302Sopenharmony_ci		pthread_rwlock_wrlock(&maplock);
103570af302Sopenharmony_ci		if (!io_thread_stack_size) {
104570af302Sopenharmony_ci			unsigned long val = __getauxval(AT_MINSIGSTKSZ);
105570af302Sopenharmony_ci			io_thread_stack_size = MAX(MINSIGSTKSZ+2048, val+512);
106570af302Sopenharmony_ci		}
107570af302Sopenharmony_ci		if (!map) map = calloc(sizeof *map, (-1U/2+1)>>24);
108570af302Sopenharmony_ci		if (!map) goto out;
109570af302Sopenharmony_ci		if (!map[a]) map[a] = calloc(sizeof **map, 256);
110570af302Sopenharmony_ci		if (!map[a]) goto out;
111570af302Sopenharmony_ci		if (!map[a][b]) map[a][b] = calloc(sizeof ***map, 256);
112570af302Sopenharmony_ci		if (!map[a][b]) goto out;
113570af302Sopenharmony_ci		if (!map[a][b][c]) map[a][b][c] = calloc(sizeof ****map, 256);
114570af302Sopenharmony_ci		if (!map[a][b][c]) goto out;
115570af302Sopenharmony_ci		if (!(q = map[a][b][c][d])) {
116570af302Sopenharmony_ci			map[a][b][c][d] = q = calloc(sizeof *****map, 1);
117570af302Sopenharmony_ci			if (q) {
118570af302Sopenharmony_ci				q->fd = fd;
119570af302Sopenharmony_ci				pthread_mutex_init(&q->lock, 0);
120570af302Sopenharmony_ci				pthread_cond_init(&q->cond, 0);
121570af302Sopenharmony_ci				a_inc(&aio_fd_cnt);
122570af302Sopenharmony_ci			}
123570af302Sopenharmony_ci		}
124570af302Sopenharmony_ci	}
125570af302Sopenharmony_ci	if (q) pthread_mutex_lock(&q->lock);
126570af302Sopenharmony_ciout:
127570af302Sopenharmony_ci	pthread_rwlock_unlock(&maplock);
128570af302Sopenharmony_ci	if (masked) pthread_sigmask(SIG_SETMASK, &origmask, 0);
129570af302Sopenharmony_ci	return q;
130570af302Sopenharmony_ci}
131570af302Sopenharmony_ci
132570af302Sopenharmony_cistatic void __aio_unref_queue(struct aio_queue *q)
133570af302Sopenharmony_ci{
134570af302Sopenharmony_ci	if (q->ref > 1) {
135570af302Sopenharmony_ci		q->ref--;
136570af302Sopenharmony_ci		pthread_mutex_unlock(&q->lock);
137570af302Sopenharmony_ci		return;
138570af302Sopenharmony_ci	}
139570af302Sopenharmony_ci
140570af302Sopenharmony_ci	/* This is potentially the last reference, but a new reference
141570af302Sopenharmony_ci	 * may arrive since we cannot free the queue object without first
142570af302Sopenharmony_ci	 * taking the maplock, which requires releasing the queue lock. */
143570af302Sopenharmony_ci	pthread_mutex_unlock(&q->lock);
144570af302Sopenharmony_ci	pthread_rwlock_wrlock(&maplock);
145570af302Sopenharmony_ci	pthread_mutex_lock(&q->lock);
146570af302Sopenharmony_ci	if (q->ref == 1) {
147570af302Sopenharmony_ci		int fd=q->fd;
148570af302Sopenharmony_ci		int a=fd>>24;
149570af302Sopenharmony_ci		unsigned char b=fd>>16, c=fd>>8, d=fd;
150570af302Sopenharmony_ci		map[a][b][c][d] = 0;
151570af302Sopenharmony_ci		a_dec(&aio_fd_cnt);
152570af302Sopenharmony_ci		pthread_rwlock_unlock(&maplock);
153570af302Sopenharmony_ci		pthread_mutex_unlock(&q->lock);
154570af302Sopenharmony_ci		free(q);
155570af302Sopenharmony_ci	} else {
156570af302Sopenharmony_ci		q->ref--;
157570af302Sopenharmony_ci		pthread_rwlock_unlock(&maplock);
158570af302Sopenharmony_ci		pthread_mutex_unlock(&q->lock);
159570af302Sopenharmony_ci	}
160570af302Sopenharmony_ci}
161570af302Sopenharmony_ci
162570af302Sopenharmony_cistatic void cleanup(void *ctx)
163570af302Sopenharmony_ci{
164570af302Sopenharmony_ci	struct aio_thread *at = ctx;
165570af302Sopenharmony_ci	struct aio_queue *q = at->q;
166570af302Sopenharmony_ci	struct aiocb *cb = at->cb;
167570af302Sopenharmony_ci	struct sigevent sev = cb->aio_sigevent;
168570af302Sopenharmony_ci
169570af302Sopenharmony_ci	/* There are four potential types of waiters we could need to wake:
170570af302Sopenharmony_ci	 *   1. Callers of aio_cancel/close.
171570af302Sopenharmony_ci	 *   2. Callers of aio_suspend with a single aiocb.
172570af302Sopenharmony_ci	 *   3. Callers of aio_suspend with a list.
173570af302Sopenharmony_ci	 *   4. AIO worker threads waiting for sequenced operations.
174570af302Sopenharmony_ci	 * Types 1-3 are notified via atomics/futexes, mainly for AS-safety
175570af302Sopenharmony_ci	 * considerations. Type 4 is notified later via a cond var. */
176570af302Sopenharmony_ci
177570af302Sopenharmony_ci	cb->__ret = at->ret;
178570af302Sopenharmony_ci	if (a_swap(&at->running, 0) < 0)
179570af302Sopenharmony_ci		__wake(&at->running, -1, 1);
180570af302Sopenharmony_ci	if (a_swap(&cb->__err, at->err) != EINPROGRESS)
181570af302Sopenharmony_ci		__wake(&cb->__err, -1, 1);
182570af302Sopenharmony_ci	if (a_swap(&__aio_fut, 0))
183570af302Sopenharmony_ci		__wake(&__aio_fut, -1, 1);
184570af302Sopenharmony_ci
185570af302Sopenharmony_ci	pthread_mutex_lock(&q->lock);
186570af302Sopenharmony_ci
187570af302Sopenharmony_ci	if (at->next) at->next->prev = at->prev;
188570af302Sopenharmony_ci	if (at->prev) at->prev->next = at->next;
189570af302Sopenharmony_ci	else q->head = at->next;
190570af302Sopenharmony_ci
191570af302Sopenharmony_ci	/* Signal aio worker threads waiting for sequenced operations. */
192570af302Sopenharmony_ci	pthread_cond_broadcast(&q->cond);
193570af302Sopenharmony_ci
194570af302Sopenharmony_ci	__aio_unref_queue(q);
195570af302Sopenharmony_ci
196570af302Sopenharmony_ci	if (sev.sigev_notify == SIGEV_SIGNAL) {
197570af302Sopenharmony_ci		siginfo_t si = {
198570af302Sopenharmony_ci			.si_signo = sev.sigev_signo,
199570af302Sopenharmony_ci			.si_value = sev.sigev_value,
200570af302Sopenharmony_ci			.si_code = SI_ASYNCIO,
201570af302Sopenharmony_ci			.si_pid = getpid(),
202570af302Sopenharmony_ci			.si_uid = getuid()
203570af302Sopenharmony_ci		};
204570af302Sopenharmony_ci		__syscall(SYS_rt_sigqueueinfo, si.si_pid, si.si_signo, &si);
205570af302Sopenharmony_ci	}
206570af302Sopenharmony_ci	if (sev.sigev_notify == SIGEV_THREAD) {
207570af302Sopenharmony_ci#ifdef FEATURE_PTHREAD_CANCEL
208570af302Sopenharmony_ci		a_store(&__pthread_self()->cancel, 0);
209570af302Sopenharmony_ci#endif
210570af302Sopenharmony_ci		sev.sigev_notify_function(sev.sigev_value);
211570af302Sopenharmony_ci	}
212570af302Sopenharmony_ci}
213570af302Sopenharmony_ci
214570af302Sopenharmony_cistatic void *io_thread_func(void *ctx)
215570af302Sopenharmony_ci{
216570af302Sopenharmony_ci	struct aio_thread at, *p;
217570af302Sopenharmony_ci
218570af302Sopenharmony_ci	struct aio_args *args = ctx;
219570af302Sopenharmony_ci	struct aiocb *cb = args->cb;
220570af302Sopenharmony_ci	int fd = cb->aio_fildes;
221570af302Sopenharmony_ci	int op = args->op;
222570af302Sopenharmony_ci	void *buf = (void *)cb->aio_buf;
223570af302Sopenharmony_ci	size_t len = cb->aio_nbytes;
224570af302Sopenharmony_ci	off_t off = cb->aio_offset;
225570af302Sopenharmony_ci
226570af302Sopenharmony_ci	struct aio_queue *q = args->q;
227570af302Sopenharmony_ci	ssize_t ret;
228570af302Sopenharmony_ci
229570af302Sopenharmony_ci	pthread_mutex_lock(&q->lock);
230570af302Sopenharmony_ci	sem_post(&args->sem);
231570af302Sopenharmony_ci
232570af302Sopenharmony_ci	at.op = op;
233570af302Sopenharmony_ci	at.running = 1;
234570af302Sopenharmony_ci	at.ret = -1;
235570af302Sopenharmony_ci	at.err = ECANCELED;
236570af302Sopenharmony_ci	at.q = q;
237570af302Sopenharmony_ci	at.td = __pthread_self();
238570af302Sopenharmony_ci	at.cb = cb;
239570af302Sopenharmony_ci	at.prev = 0;
240570af302Sopenharmony_ci	if ((at.next = q->head)) at.next->prev = &at;
241570af302Sopenharmony_ci	q->head = &at;
242570af302Sopenharmony_ci
243570af302Sopenharmony_ci	if (!q->init) {
244570af302Sopenharmony_ci		int seekable = lseek(fd, 0, SEEK_CUR) >= 0;
245570af302Sopenharmony_ci		q->seekable = seekable;
246570af302Sopenharmony_ci		q->append = !seekable || (fcntl(fd, F_GETFL) & O_APPEND);
247570af302Sopenharmony_ci		q->init = 1;
248570af302Sopenharmony_ci	}
249570af302Sopenharmony_ci
250570af302Sopenharmony_ci	pthread_cleanup_push(cleanup, &at);
251570af302Sopenharmony_ci
252570af302Sopenharmony_ci	/* Wait for sequenced operations. */
253570af302Sopenharmony_ci	if (op!=LIO_READ && (op!=LIO_WRITE || q->append)) {
254570af302Sopenharmony_ci		for (;;) {
255570af302Sopenharmony_ci			for (p=at.next; p && p->op!=LIO_WRITE; p=p->next);
256570af302Sopenharmony_ci			if (!p) break;
257570af302Sopenharmony_ci			pthread_cond_wait(&q->cond, &q->lock);
258570af302Sopenharmony_ci		}
259570af302Sopenharmony_ci	}
260570af302Sopenharmony_ci
261570af302Sopenharmony_ci	pthread_mutex_unlock(&q->lock);
262570af302Sopenharmony_ci
263570af302Sopenharmony_ci	switch (op) {
264570af302Sopenharmony_ci	case LIO_WRITE:
265570af302Sopenharmony_ci		ret = q->append ? write(fd, buf, len) : pwrite(fd, buf, len, off);
266570af302Sopenharmony_ci		break;
267570af302Sopenharmony_ci	case LIO_READ:
268570af302Sopenharmony_ci		ret = !q->seekable ? read(fd, buf, len) : pread(fd, buf, len, off);
269570af302Sopenharmony_ci		break;
270570af302Sopenharmony_ci	case O_SYNC:
271570af302Sopenharmony_ci		ret = fsync(fd);
272570af302Sopenharmony_ci		break;
273570af302Sopenharmony_ci	case O_DSYNC:
274570af302Sopenharmony_ci		ret = fdatasync(fd);
275570af302Sopenharmony_ci		break;
276570af302Sopenharmony_ci	}
277570af302Sopenharmony_ci	at.ret = ret;
278570af302Sopenharmony_ci	at.err = ret<0 ? errno : 0;
279570af302Sopenharmony_ci
280570af302Sopenharmony_ci	pthread_cleanup_pop(1);
281570af302Sopenharmony_ci
282570af302Sopenharmony_ci	return 0;
283570af302Sopenharmony_ci}
284570af302Sopenharmony_ci
285570af302Sopenharmony_cistatic int submit(struct aiocb *cb, int op)
286570af302Sopenharmony_ci{
287570af302Sopenharmony_ci	int ret = 0;
288570af302Sopenharmony_ci	pthread_attr_t a;
289570af302Sopenharmony_ci	sigset_t allmask, origmask;
290570af302Sopenharmony_ci	pthread_t td;
291570af302Sopenharmony_ci	struct aio_queue *q = __aio_get_queue(cb->aio_fildes, 1);
292570af302Sopenharmony_ci	struct aio_args args = { .cb = cb, .op = op, .q = q };
293570af302Sopenharmony_ci	sem_init(&args.sem, 0, 0);
294570af302Sopenharmony_ci
295570af302Sopenharmony_ci	if (!q) {
296570af302Sopenharmony_ci		if (errno != EBADF) errno = EAGAIN;
297570af302Sopenharmony_ci		cb->__ret = -1;
298570af302Sopenharmony_ci		cb->__err = errno;
299570af302Sopenharmony_ci		return -1;
300570af302Sopenharmony_ci	}
301570af302Sopenharmony_ci	q->ref++;
302570af302Sopenharmony_ci	pthread_mutex_unlock(&q->lock);
303570af302Sopenharmony_ci
304570af302Sopenharmony_ci	if (cb->aio_sigevent.sigev_notify == SIGEV_THREAD) {
305570af302Sopenharmony_ci		if (cb->aio_sigevent.sigev_notify_attributes)
306570af302Sopenharmony_ci			a = *cb->aio_sigevent.sigev_notify_attributes;
307570af302Sopenharmony_ci		else
308570af302Sopenharmony_ci			pthread_attr_init(&a);
309570af302Sopenharmony_ci	} else {
310570af302Sopenharmony_ci		pthread_attr_init(&a);
311570af302Sopenharmony_ci		pthread_attr_setstacksize(&a, io_thread_stack_size);
312570af302Sopenharmony_ci		pthread_attr_setguardsize(&a, 0);
313570af302Sopenharmony_ci	}
314570af302Sopenharmony_ci	pthread_attr_setdetachstate(&a, PTHREAD_CREATE_DETACHED);
315570af302Sopenharmony_ci	sigfillset(&allmask);
316570af302Sopenharmony_ci	pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
317570af302Sopenharmony_ci	cb->__err = EINPROGRESS;
318570af302Sopenharmony_ci	if (pthread_create(&td, &a, io_thread_func, &args)) {
319570af302Sopenharmony_ci		pthread_mutex_lock(&q->lock);
320570af302Sopenharmony_ci		__aio_unref_queue(q);
321570af302Sopenharmony_ci		cb->__err = errno = EAGAIN;
322570af302Sopenharmony_ci		cb->__ret = ret = -1;
323570af302Sopenharmony_ci	}
324570af302Sopenharmony_ci	pthread_sigmask(SIG_SETMASK, &origmask, 0);
325570af302Sopenharmony_ci
326570af302Sopenharmony_ci	if (!ret) {
327570af302Sopenharmony_ci		while (sem_wait(&args.sem));
328570af302Sopenharmony_ci	}
329570af302Sopenharmony_ci
330570af302Sopenharmony_ci	return ret;
331570af302Sopenharmony_ci}
332570af302Sopenharmony_ci
333570af302Sopenharmony_ciint aio_read(struct aiocb *cb)
334570af302Sopenharmony_ci{
335570af302Sopenharmony_ci	UNSUPPORTED_API_VOID(LITEOS_A);
336570af302Sopenharmony_ci	return submit(cb, LIO_READ);
337570af302Sopenharmony_ci}
338570af302Sopenharmony_ci
339570af302Sopenharmony_ciint aio_write(struct aiocb *cb)
340570af302Sopenharmony_ci{
341570af302Sopenharmony_ci	UNSUPPORTED_API_VOID(LITEOS_A);
342570af302Sopenharmony_ci	return submit(cb, LIO_WRITE);
343570af302Sopenharmony_ci}
344570af302Sopenharmony_ci
345570af302Sopenharmony_ciint aio_fsync(int op, struct aiocb *cb)
346570af302Sopenharmony_ci{
347570af302Sopenharmony_ci	UNSUPPORTED_API_VOID(LITEOS_A);
348570af302Sopenharmony_ci	if (op != O_SYNC && op != O_DSYNC) {
349570af302Sopenharmony_ci		errno = EINVAL;
350570af302Sopenharmony_ci		return -1;
351570af302Sopenharmony_ci	}
352570af302Sopenharmony_ci	return submit(cb, op);
353570af302Sopenharmony_ci}
354570af302Sopenharmony_ci
355570af302Sopenharmony_cissize_t aio_return(struct aiocb *cb)
356570af302Sopenharmony_ci{
357570af302Sopenharmony_ci	UNSUPPORTED_API_VOID(LITEOS_A);
358570af302Sopenharmony_ci	return cb->__ret;
359570af302Sopenharmony_ci}
360570af302Sopenharmony_ci
361570af302Sopenharmony_ciint aio_error(const struct aiocb *cb)
362570af302Sopenharmony_ci{
363570af302Sopenharmony_ci	UNSUPPORTED_API_VOID(LITEOS_A);
364570af302Sopenharmony_ci	a_barrier();
365570af302Sopenharmony_ci	return cb->__err & 0x7fffffff;
366570af302Sopenharmony_ci}
367570af302Sopenharmony_ci
368570af302Sopenharmony_ciint aio_cancel(int fd, struct aiocb *cb)
369570af302Sopenharmony_ci{
370570af302Sopenharmony_ci	sigset_t allmask, origmask;
371570af302Sopenharmony_ci	int ret = AIO_ALLDONE;
372570af302Sopenharmony_ci	struct aio_thread *p;
373570af302Sopenharmony_ci	struct aio_queue *q;
374570af302Sopenharmony_ci
375570af302Sopenharmony_ci	UNSUPPORTED_API_VOID(LITEOS_A);
376570af302Sopenharmony_ci	/* Unspecified behavior case. Report an error. */
377570af302Sopenharmony_ci	if (cb && fd != cb->aio_fildes) {
378570af302Sopenharmony_ci		errno = EINVAL;
379570af302Sopenharmony_ci		return -1;
380570af302Sopenharmony_ci	}
381570af302Sopenharmony_ci
382570af302Sopenharmony_ci	sigfillset(&allmask);
383570af302Sopenharmony_ci	pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
384570af302Sopenharmony_ci
385570af302Sopenharmony_ci	errno = ENOENT;
386570af302Sopenharmony_ci	if (!(q = __aio_get_queue(fd, 0))) {
387570af302Sopenharmony_ci		if (errno == EBADF) ret = -1;
388570af302Sopenharmony_ci		goto done;
389570af302Sopenharmony_ci	}
390570af302Sopenharmony_ci
391570af302Sopenharmony_ci	for (p = q->head; p; p = p->next) {
392570af302Sopenharmony_ci		if (cb && cb != p->cb) continue;
393570af302Sopenharmony_ci		/* Transition target from running to running-with-waiters */
394570af302Sopenharmony_ci		if (a_cas(&p->running, 1, -1)) {
395570af302Sopenharmony_ci#ifdef FEATURE_PTHREAD_CANCEL
396570af302Sopenharmony_ci			pthread_cancel(p->td);
397570af302Sopenharmony_ci#else
398570af302Sopenharmony_ci			__syscall(SYS_tkill, p->td->tid, SIGCANCEL);
399570af302Sopenharmony_ci#endif
400570af302Sopenharmony_ci			__wait(&p->running, 0, -1, 1);
401570af302Sopenharmony_ci			if (p->err == ECANCELED) ret = AIO_CANCELED;
402570af302Sopenharmony_ci		}
403570af302Sopenharmony_ci	}
404570af302Sopenharmony_ci
405570af302Sopenharmony_ci	pthread_mutex_unlock(&q->lock);
406570af302Sopenharmony_cidone:
407570af302Sopenharmony_ci	pthread_sigmask(SIG_SETMASK, &origmask, 0);
408570af302Sopenharmony_ci	return ret;
409570af302Sopenharmony_ci}
410570af302Sopenharmony_ci
411570af302Sopenharmony_ciint __aio_close(int fd)
412570af302Sopenharmony_ci{
413570af302Sopenharmony_ci	a_barrier();
414570af302Sopenharmony_ci	if (aio_fd_cnt) aio_cancel(fd, 0);
415570af302Sopenharmony_ci	return fd;
416570af302Sopenharmony_ci}
417570af302Sopenharmony_ci
418570af302Sopenharmony_civoid __aio_atfork(int who)
419570af302Sopenharmony_ci{
420570af302Sopenharmony_ci	if (who<0) {
421570af302Sopenharmony_ci		pthread_rwlock_rdlock(&maplock);
422570af302Sopenharmony_ci		return;
423570af302Sopenharmony_ci	} else if (!who) {
424570af302Sopenharmony_ci		pthread_rwlock_unlock(&maplock);
425570af302Sopenharmony_ci		return;
426570af302Sopenharmony_ci	}
427570af302Sopenharmony_ci	aio_fd_cnt = 0;
428570af302Sopenharmony_ci	if (pthread_rwlock_tryrdlock(&maplock)) {
429570af302Sopenharmony_ci		/* Obtaining lock may fail if _Fork was called nor via
430570af302Sopenharmony_ci		 * fork. In this case, no further aio is possible from
431570af302Sopenharmony_ci		 * child and we can just null out map so __aio_close
432570af302Sopenharmony_ci		 * does not attempt to do anything. */
433570af302Sopenharmony_ci		map = 0;
434570af302Sopenharmony_ci		return;
435570af302Sopenharmony_ci	}
436570af302Sopenharmony_ci	if (map) for (int a=0; a<(-1U/2+1)>>24; a++)
437570af302Sopenharmony_ci		if (map[a]) for (int b=0; b<256; b++)
438570af302Sopenharmony_ci			if (map[a][b]) for (int c=0; c<256; c++)
439570af302Sopenharmony_ci				if (map[a][b][c]) for (int d=0; d<256; d++)
440570af302Sopenharmony_ci					map[a][b][c][d] = 0;
441570af302Sopenharmony_ci	/* Re-initialize the rwlock rather than unlocking since there
442570af302Sopenharmony_ci	 * may have been more than one reference on it in the parent.
443570af302Sopenharmony_ci	 * We are not a lock holder anyway; the thread in the parent was. */
444570af302Sopenharmony_ci	pthread_rwlock_init(&maplock, 0);
445570af302Sopenharmony_ci}
446570af302Sopenharmony_ci
447570af302Sopenharmony_ciweak_alias(aio_cancel, aio_cancel64);
448570af302Sopenharmony_ciweak_alias(aio_error, aio_error64);
449570af302Sopenharmony_ciweak_alias(aio_fsync, aio_fsync64);
450570af302Sopenharmony_ciweak_alias(aio_read, aio_read64);
451570af302Sopenharmony_ciweak_alias(aio_write, aio_write64);
452570af302Sopenharmony_ciweak_alias(aio_return, aio_return64);
453