Lines Matching refs:aio
229 * every aio write request started in record__aio_push() so
236 * aio write request may require restart with the
252 struct aiocb **aiocb = md->aio.aiocb;
253 struct aiocb *cblocks = md->aio.cblocks;
259 for (i = 0; i < md->aio.nr_cblocks; ++i) {
267 * Started aio write is not complete yet
278 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
293 struct record_aio *aio = to;
296 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
309 if (record__comp_enabled(aio->rec)) {
310 size = zstd_compress(aio->rec->session, aio->data + aio->size,
311 mmap__mmap_len(map) - aio->size,
314 memcpy(aio->data + aio->size, buf, size);
317 if (!aio->size) {
319 * Increment map->refcount to guard map->aio.data[] buffer
321 * released earlier than aio write request started on
322 * map->aio.data[] buffer is complete.
325 * after started aio request completion or at record__aio_push()
331 aio->size += size;
340 struct record_aio aio = { .rec = rec, .size = 0 };
343 * Call record__aio_sync() to wait till map->aio.data[] buffer
344 * becomes available after previous aio write operation.
348 aio.data = map->aio.data[idx];
349 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
354 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
356 *off += aio.size;
357 rec->bytes_written += aio.size;
365 * aio write operation finishes successfully.
2151 if (!strcmp(var, "record.aio")) {
2573 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,