Lines Matching defs:map
238 static int record__write(struct record *rec, struct mmap *map __maybe_unused,
243 if (map && map->file)
244 file = map->file;
251 if (map && map->file) {
273 static size_t zstd_compress(struct perf_session *session, struct mmap *map,
389 static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
394 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
403 * part of data from map->start till the upper bound and then the reminder
409 mmap__mmap_len(map) - aio->size,
417 * Increment map->refcount to guard map->aio.data[] buffer
418 * from premature deallocation because map object can be
420 * map->aio.data[] buffer is complete.
426 perf_mmap__get(&map->core);
434 static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
441 * Call record__aio_sync() to wait till map->aio.data[] buffer
445 idx = record__aio_sync(map, false);
446 aio.data = map->aio.data[idx];
447 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
452 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
460 * Decrement map->refcount incremented in record__aio_pushfn()
462 * map->refcount is decremented in record__aio_complete() after
465 perf_mmap__put(&map->core);
491 struct mmap *map = &maps[i];
493 if (map->core.base)
494 record__aio_sync(map, true);
521 static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
631 static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
636 size = zstd_compress(rec->session, map, map->data, mmap__mmap_len(map), bf, size);
637 bf = map->data;
641 return record__write(rec, map, bf, size);
698 struct mmap *map,
726 record__write(rec, map, event, event->header.size);
727 record__write(rec, map, data1, len1);
729 record__write(rec, map, data2, len2);
730 record__write(rec, map, &pad, padding);
736 struct mmap *map)
740 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
752 struct mmap *map)
756 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
774 struct mmap *map = &rec->evlist->mmap[i];
776 if (!map->auxtrace_mmap.base)
779 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
852 struct mmap *map __maybe_unused)
1044 struct mmap *map, *overwrite_map;
1049 map = thread_data->maps ? thread_data->maps[tm] : NULL;
1056 if ((map && ptr == map) || (overwrite_map && ptr == overwrite_map)) {
1145 pr_err("Failed to map thread and evlist pollfd indexes\n");
1404 * During this process, it'll load kernel map and replace the
1465 static void record__adjust_affinity(struct record *rec, struct mmap *map)
1468 !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits,
1472 map->affinity_mask.bits, thread->mask->affinity.nbits);
1498 static size_t zstd_compress(struct perf_session *session, struct mmap *map,
1505 if (map && map->file)
1506 zstd_data = &map->zstd_data;
1511 if (map && map->file) {
1550 struct mmap *map = maps[i];
1552 if (map->core.base) {
1553 record__adjust_affinity(rec, map);
1555 flush = map->core.flush;
1556 map->core.flush = 1;
1559 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
1561 map->core.flush = flush;
1566 if (record__aio_push(rec, map, &off) < 0) {
1569 map->core.flush = flush;
1575 map->core.flush = flush;
1578 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
1580 record__auxtrace_mmap_read(rec, map) != 0) {
1620 struct perf_mmap *map = fda->priv[fd].ptr;
1622 if (map)
1623 perf_mmap__put(map);
1847 * contain map and comm information.
2044 pr_err("Couldn't synthesize thread map.\n");
2051 pr_err("Couldn't synthesize cpu map.\n");
3437 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
3461 "Record build-id in map events"),