Lines Matching refs:ff
91 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
93 ssize_t ret = writen(ff->fd, buf, size);
100 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
104 size_t new_size = ff->size;
107 if (size + ff->offset > max_size)
110 while (size > (new_size - ff->offset))
114 if (ff->size < new_size) {
115 addr = realloc(ff->buf, new_size);
118 ff->buf = addr;
119 ff->size = new_size;
122 memcpy(ff->buf + ff->offset, buf, size);
123 ff->offset += size;
129 int do_write(struct feat_fd *ff, const void *buf, size_t size)
131 if (!ff->buf)
132 return __do_write_fd(ff, buf, size);
133 return __do_write_buf(ff, buf, size);
137 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
142 ret = do_write(ff, &size, sizeof(size));
147 ret = do_write(ff, p + i, sizeof(*p));
156 int write_padded(struct feat_fd *ff, const void *bf,
160 int err = do_write(ff, bf, count);
163 err = do_write(ff, zero_buf, count_aligned - count);
172 static int do_write_string(struct feat_fd *ff, const char *str)
181 ret = do_write(ff, &len, sizeof(len));
185 return write_padded(ff, str, olen, len);
188 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
190 ssize_t ret = readn(ff->fd, addr, size);
197 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
199 if (size > (ssize_t)ff->size - ff->offset)
202 memcpy(addr, ff->buf + ff->offset, size);
203 ff->offset += size;
209 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
211 if (!ff->buf)
212 return __do_read_fd(ff, addr, size);
213 return __do_read_buf(ff, addr, size);
216 static int do_read_u32(struct feat_fd *ff, u32 *addr)
220 ret = __do_read(ff, addr, sizeof(*addr));
224 if (ff->ph->needs_swap)
229 static int do_read_u64(struct feat_fd *ff, u64 *addr)
233 ret = __do_read(ff, addr, sizeof(*addr));
237 if (ff->ph->needs_swap)
242 static char *do_read_string(struct feat_fd *ff)
247 if (do_read_u32(ff, &len))
254 if (!__do_read(ff, buf, len)) {
268 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
274 ret = do_read_u64(ff, &size);
285 ret = do_read_u64(ff, p + i);
297 static int write_tracing_data(struct feat_fd *ff,
300 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
303 return read_tracing_data(ff->fd, &evlist->core.entries);
306 static int write_build_id(struct feat_fd *ff,
312 session = container_of(ff->ph, struct perf_session, header);
317 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
320 err = perf_session__write_buildid_table(session, ff);
330 static int write_hostname(struct feat_fd *ff,
340 return do_write_string(ff, uts.nodename);
343 static int write_osrelease(struct feat_fd *ff,
353 return do_write_string(ff, uts.release);
356 static int write_arch(struct feat_fd *ff,
366 return do_write_string(ff, uts.machine);
369 static int write_version(struct feat_fd *ff,
372 return do_write_string(ff, perf_version_string);
375 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
423 ret = do_write_string(ff, s);
430 static int write_cpudesc(struct feat_fd *ff,
456 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
464 static int write_nrcpus(struct feat_fd *ff,
479 ret = do_write(ff, &nrc, sizeof(nrc));
483 return do_write(ff, &nra, sizeof(nra));
486 static int write_event_desc(struct feat_fd *ff,
498 ret = do_write(ff, &nre, sizeof(nre));
506 ret = do_write(ff, &sz, sizeof(sz));
511 ret = do_write(ff, &evsel->core.attr, sz);
522 ret = do_write(ff, &nri, sizeof(nri));
529 ret = do_write_string(ff, evsel__name(evsel));
535 ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64));
542 static int write_cmdline(struct feat_fd *ff,
554 ret = do_write(ff, &n, sizeof(n));
558 ret = do_write_string(ff, buf);
563 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
571 static int write_cpu_topology(struct feat_fd *ff,
582 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
587 ret = do_write_string(ff, tp->core_siblings[i]);
591 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
596 ret = do_write_string(ff, tp->thread_siblings[i]);
606 ret = do_write(ff, &perf_env.cpu[j].core_id,
610 ret = do_write(ff, &perf_env.cpu[j].socket_id,
619 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
624 ret = do_write_string(ff, tp->die_siblings[i]);
630 ret = do_write(ff, &perf_env.cpu[j].die_id,
643 static int write_total_mem(struct feat_fd *ff,
664 ret = do_write(ff, &mem, sizeof(mem));
672 static int write_numa_topology(struct feat_fd *ff,
683 ret = do_write(ff, &tp->nr, sizeof(u32));
690 ret = do_write(ff, &n->node, sizeof(u32));
694 ret = do_write(ff, &n->mem_total, sizeof(u64));
698 ret = do_write(ff, &n->mem_free, sizeof(u64));
702 ret = do_write_string(ff, n->cpus);
726 static int write_pmu_mappings(struct feat_fd *ff,
743 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
751 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
755 ret = do_write_string(ff, pmu->name);
775 static int write_group_desc(struct feat_fd *ff,
782 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
792 ret = do_write_string(ff, name);
796 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
800 ret = do_write(ff, &nr_members, sizeof(nr_members));
856 static int write_cpuid(struct feat_fd *ff,
866 return do_write_string(ff, buffer);
869 static int write_branch_stack(struct feat_fd *ff __maybe_unused,
875 static int write_auxtrace(struct feat_fd *ff,
881 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
884 session = container_of(ff->ph, struct perf_session, header);
886 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
892 static int write_clockid(struct feat_fd *ff,
895 return do_write(ff, &ff->ph->env.clock.clockid_res_ns,
896 sizeof(ff->ph->env.clock.clockid_res_ns));
899 static int write_clock_data(struct feat_fd *ff,
909 ret = do_write(ff, &data32, sizeof(data32));
914 data32 = ff->ph->env.clock.clockid;
916 ret = do_write(ff, &data32, sizeof(data32));
921 data64 = &ff->ph->env.clock.tod_ns;
923 ret = do_write(ff, data64, sizeof(*data64));
928 data64 = &ff->ph->env.clock.clockid_ns;
930 return do_write(ff, data64, sizeof(*data64));
933 static int write_dir_format(struct feat_fd *ff,
939 session = container_of(ff->ph, struct perf_session, header);
945 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
949 static int write_bpf_prog_info(struct feat_fd *ff,
952 struct perf_env *env = &ff->ph->env;
959 ret = do_write(ff, &env->bpf_progs.infos_cnt,
977 ret = do_write(ff, node->info_linear, len);
991 static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
998 static int write_bpf_btf(struct feat_fd *ff,
1001 struct perf_env *env = &ff->ph->env;
1008 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
1021 ret = do_write(ff, &node->id,
1163 static int write_cache(struct feat_fd *ff,
1177 ret = do_write(ff, &version, sizeof(u32));
1181 ret = do_write(ff, &cnt, sizeof(u32));
1189 ret = do_write(ff, &c->v, sizeof(u32)); \
1200 ret = do_write_string(ff, (const char *) c->v); \
1216 static int write_stat(struct feat_fd *ff __maybe_unused,
1222 static int write_sample_time(struct feat_fd *ff,
1227 ret = do_write(ff, &evlist->first_sample_time,
1232 return do_write(ff, &evlist->last_sample_time,
1358 static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1374 ret = do_write(ff, &version, sizeof(version));
1378 ret = do_write(ff, &bsize, sizeof(bsize));
1382 ret = do_write(ff, &nr, sizeof(nr));
1390 ret = do_write(ff, &n->v, sizeof(n->v)); \
1399 ret = do_write_bitmap(ff, n->set, n->size);
1408 static int write_compressed(struct feat_fd *ff __maybe_unused,
1413 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1417 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1421 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1425 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1429 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1432 static int write_cpu_pmu_caps(struct feat_fd *ff,
1447 ret = do_write(ff, &nr_caps, sizeof(nr_caps));
1452 ret = do_write_string(ff, caps->name);
1456 ret = do_write_string(ff, caps->value);
1464 static void print_hostname(struct feat_fd *ff, FILE *fp)
1466 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1469 static void print_osrelease(struct feat_fd *ff, FILE *fp)
1471 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1474 static void print_arch(struct feat_fd *ff, FILE *fp)
1476 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1479 static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1481 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1484 static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1486 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1487 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1490 static void print_version(struct feat_fd *ff, FILE *fp)
1492 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1495 static void print_cmdline(struct feat_fd *ff, FILE *fp)
1499 nr = ff->ph->env.nr_cmdline;
1504 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1506 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1524 static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1526 struct perf_header *ph = ff->ph;
1581 static void print_clockid(struct feat_fd *ff, FILE *fp)
1584 ff->ph->env.clock.clockid_res_ns * 1000);
1587 static void print_clock_data(struct feat_fd *ff, FILE *fp)
1596 if (!ff->ph->env.clock.enabled) {
1602 ref = ff->ph->env.clock.tod_ns;
1608 ref = ff->ph->env.clock.clockid_ns;
1613 clockid = ff->ph->env.clock.clockid;
1630 static void print_dir_format(struct feat_fd *ff, FILE *fp)
1635 session = container_of(ff->ph, struct perf_session, header);
1641 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1643 struct perf_env *env = &ff->ph->env;
1665 static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1667 struct perf_env *env = &ff->ph->env;
1736 static struct evsel *read_event_desc(struct feat_fd *ff)
1745 if (do_read_u32(ff, &nre))
1748 if (do_read_u32(ff, &sz))
1772 if (__do_read(ff, buf, sz))
1775 if (ff->ph->needs_swap)
1783 if (do_read_u32(ff, &nr))
1786 if (ff->ph->needs_swap)
1789 evsel->name = do_read_string(ff);
1803 if (do_read_u64(ff, id))
1823 static void print_event_desc(struct feat_fd *ff, FILE *fp)
1829 if (ff->events)
1830 events = ff->events;
1832 events = read_event_desc(ff);
1858 ff->events = NULL;
1861 static void print_total_mem(struct feat_fd *ff, FILE *fp)
1863 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1866 static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1871 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1872 n = &ff->ph->env.numa_nodes[i];
1883 static void print_cpuid(struct feat_fd *ff, FILE *fp)
1885 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1888 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1893 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1898 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1903 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1908 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1910 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1914 static void print_compressed(struct feat_fd *ff, FILE *fp)
1917 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
1918 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
1921 static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
1924 u32 nr_caps = ff->ph->env.nr_cpu_pmu_caps;
1932 str = ff->ph->env.cpu_pmu_caps;
1942 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
1949 pmu_num = ff->ph->env.nr_pmu_mappings;
1955 str = ff->ph->env.pmu_mappings;
1978 static void print_group_desc(struct feat_fd *ff, FILE *fp)
1984 session = container_of(ff->ph, struct perf_session, header);
2000 static void print_sample_time(struct feat_fd *ff, FILE *fp)
2006 session = container_of(ff->ph, struct perf_session, header);
2035 static void print_mem_topology(struct feat_fd *ff, FILE *fp)
2040 nodes = ff->ph->env.memory_nodes;
2041 nr = ff->ph->env.nr_memory_nodes;
2044 nr, ff->ph->env.memory_bsize);
2047 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
2212 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2214 ff->ph->env.__feat_env = do_read_string(ff); \
2215 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2225 static int process_tracing_data(struct feat_fd *ff, void *data)
2227 ssize_t ret = trace_report(ff->fd, data, false);
2232 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
2234 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
2239 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
2244 ret = do_read_u32(ff, &nr_cpus_avail);
2248 ret = do_read_u32(ff, &nr_cpus_online);
2251 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2252 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
2256 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
2261 ret = do_read_u64(ff, &total_mem);
2264 ff->ph->env.total_mem = (unsigned long long)total_mem;
2301 process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
2304 struct evsel *evsel, *events = read_event_desc(ff);
2309 session = container_of(ff->ph, struct perf_session, header);
2314 ff->events = events;
2326 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
2331 if (do_read_u32(ff, &nr))
2334 ff->ph->env.nr_cmdline = nr;
2336 cmdline = zalloc(ff->size + nr + 1);
2345 str = do_read_string(ff);
2354 ff->ph->env.cmdline = cmdline;
2355 ff->ph->env.cmdline_argv = (const char **) argv;
2364 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2369 int cpu_nr = ff->ph->env.nr_cpus_avail;
2371 struct perf_header *ph = ff->ph;
2378 if (do_read_u32(ff, &nr))
2387 str = do_read_string(ff);
2399 if (do_read_u32(ff, &nr))
2406 str = do_read_string(ff);
2422 if (ff->size <= size) {
2437 if (do_read_u32(ff, &nr))
2443 if (do_read_u32(ff, &nr))
2460 if (ff->size <= size)
2463 if (do_read_u32(ff, &nr))
2470 str = do_read_string(ff);
2483 if (do_read_u32(ff, &nr))
2498 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2505 if (do_read_u32(ff, &nr))
2516 if (do_read_u32(ff, &n->node))
2519 if (do_read_u64(ff, &n->mem_total))
2522 if (do_read_u64(ff, &n->mem_free))
2525 str = do_read_string(ff);
2535 ff->ph->env.nr_numa_nodes = nr;
2536 ff->ph->env.numa_nodes = nodes;
2544 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2551 if (do_read_u32(ff, &pmu_num))
2559 ff->ph->env.nr_pmu_mappings = pmu_num;
2564 if (do_read_u32(ff, &type))
2567 name = do_read_string(ff);
2578 ff->ph->env.msr_pmu_type = type;
2583 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2591 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2603 if (do_read_u32(ff, &nr_groups))
2606 ff->ph->env.nr_groups = nr_groups;
2617 desc[i].name = do_read_string(ff);
2621 if (do_read_u32(ff, &desc[i].leader_idx))
2624 if (do_read_u32(ff, &desc[i].nr_members))
2631 session = container_of(ff->ph, struct perf_session, header);
2675 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2680 session = container_of(ff->ph, struct perf_session, header);
2682 err = auxtrace_index__process(ff->fd, ff->size, session,
2683 ff->ph->needs_swap);
2689 static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2694 if (do_read_u32(ff, &version))
2700 if (do_read_u32(ff, &cnt))
2711 if (do_read_u32(ff, &c.v))\
2721 c.v = do_read_string(ff); \
2733 ff->ph->env.caches = caches;
2734 ff->ph->env.caches_cnt = cnt;
2741 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2747 session = container_of(ff->ph, struct perf_session, header);
2749 ret = do_read_u64(ff, &first_sample_time);
2753 ret = do_read_u64(ff, &last_sample_time);
2762 static int process_mem_topology(struct feat_fd *ff,
2769 if (do_read_u64(ff, &version))
2775 if (do_read_u64(ff, &bsize))
2778 if (do_read_u64(ff, &nr))
2789 if (do_read_u64(ff, &n.v)) \
2797 if (do_read_bitmap(ff, &n.set, &n.size))
2803 ff->ph->env.memory_bsize = bsize;
2804 ff->ph->env.memory_nodes = nodes;
2805 ff->ph->env.nr_memory_nodes = nr;
2814 static int process_clockid(struct feat_fd *ff,
2817 if (do_read_u64(ff, &ff->ph->env.clock.clockid_res_ns))
2823 static int process_clock_data(struct feat_fd *ff,
2830 if (do_read_u32(ff, &data32))
2837 if (do_read_u32(ff, &data32))
2840 ff->ph->env.clock.clockid = data32;
2843 if (do_read_u64(ff, &data64))
2846 ff->ph->env.clock.tod_ns = data64;
2849 if (do_read_u64(ff, &data64))
2852 ff->ph->env.clock.clockid_ns = data64;
2853 ff->ph->env.clock.enabled = true;
2857 static int process_dir_format(struct feat_fd *ff,
2863 session = container_of(ff->ph, struct perf_session, header);
2869 return do_read_u64(ff, &data->dir.version);
2873 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
2877 struct perf_env *env = &ff->ph->env;
2881 if (ff->ph->needs_swap) {
2886 if (do_read_u32(ff, &count))
2896 if (do_read_u32(ff, &info_len))
2898 if (do_read_u32(ff, &data_len))
2912 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
2914 if (__do_read(ff, &info_linear->info, info_len))
2920 if (__do_read(ff, info_linear->data, data_len))
2942 static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
2948 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
2950 struct perf_env *env = &ff->ph->env;
2955 if (ff->ph->needs_swap) {
2960 if (do_read_u32(ff, &count))
2968 if (do_read_u32(ff, &id))
2970 if (do_read_u32(ff, &data_size))
2980 if (__do_read(ff, node->data, data_size))
2994 static int process_compressed(struct feat_fd *ff,
2997 if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
3000 if (do_read_u32(ff, &(ff->ph->env.comp_type)))
3003 if (do_read_u32(ff, &(ff->ph->env.comp_level)))
3006 if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
3009 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
3015 static int process_cpu_pmu_caps(struct feat_fd *ff,
3022 if (do_read_u32(ff, &nr_caps))
3030 ff->ph->env.nr_cpu_pmu_caps = nr_caps;
3036 name = do_read_string(ff);
3040 value = do_read_string(ff);
3052 ff->ph->env.max_branches = atoi(value);
3057 ff->ph->env.cpu_pmu_caps = strbuf_detach(&sb, NULL);
3140 struct feat_fd ff;
3154 ff = (struct feat_fd) {
3160 feat_ops[feat].print(&ff, hd->fp);
3208 static int do_write_feat(struct feat_fd *ff, int type,
3215 if (perf_header__has_feat(ff->ph, type)) {
3219 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
3222 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
3224 err = feat_ops[type].write(ff, evlist);
3229 lseek(ff->fd, (*p)->offset, SEEK_SET);
3233 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
3243 struct feat_fd ff;
3250 ff = (struct feat_fd){
3269 if (do_write_feat(&ff, feat, &p, evlist))
3278 err = do_write(&ff, feat_sec, sec_size);
3288 struct feat_fd ff;
3291 ff = (struct feat_fd){ .fd = fd };
3298 err = do_write(&ff, &f_header, sizeof(f_header));
3315 struct feat_fd ff;
3319 ff = (struct feat_fd){ .fd = fd};
3324 err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
3331 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
3341 err = do_write(&ff, &f_attr, sizeof(f_attr));
3376 err = do_write(&ff, &f_header, sizeof(f_header));
3658 struct feat_fd ff = {
3676 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
3909 struct feat_fd ff = { .fd = 0 };
3926 ff.buf = (void *)fe->data;
3927 ff.size = event->header.size - sizeof(*fe);
3928 ff.ph = &session->header;
3930 if (feat_ops[feat].process(&ff, NULL))
3938 feat_ops[feat].print(&ff, stdout);