xref: /third_party/mesa3d/src/util/perf/u_trace.c (revision bf215546)
1/*
2 * Copyright © 2020 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include <inttypes.h>
25
26#include "util/list.h"
27#include "util/u_debug.h"
28#include "util/u_inlines.h"
29#include "util/u_fifo.h"
30#include "util/u_vector.h"
31
32#include "u_trace.h"
33
34#define __NEEDS_TRACE_PRIV
35#include "u_trace_priv.h"
36
37#define PAYLOAD_BUFFER_SIZE 0x100
38#define TIMESTAMP_BUF_SIZE 0x1000
39#define TRACES_PER_CHUNK   (TIMESTAMP_BUF_SIZE / sizeof(uint64_t))
40
41bool ut_trace_instrument;
42
43#ifdef HAVE_PERFETTO
44int ut_perfetto_enabled;
45
46/**
47 * Global list of contexts, so we can defer starting the queue until
48 * perfetto tracing is started.
49 *
50 * TODO locking
51 */
52struct list_head ctx_list = { &ctx_list, &ctx_list };
53#endif
54
55struct u_trace_payload_buf {
56   uint32_t refcount;
57
58   uint8_t *buf;
59   uint8_t *next;
60   uint8_t *end;
61};
62
63struct u_trace_event {
64   const struct u_tracepoint *tp;
65   const void *payload;
66};
67
68/**
69 * A "chunk" of trace-events and corresponding timestamp buffer.  As
70 * trace events are emitted, additional trace chucks will be allocated
71 * as needed.  When u_trace_flush() is called, they are transferred
72 * from the u_trace to the u_trace_context queue.
73 */
74struct u_trace_chunk {
75   struct list_head node;
76
77   struct u_trace_context *utctx;
78
79   /* The number of traces this chunk contains so far: */
80   unsigned num_traces;
81
82   /* table of trace events: */
83   struct u_trace_event traces[TRACES_PER_CHUNK];
84
85   /* table of driver recorded 64b timestamps, index matches index
86    * into traces table
87    */
88   void *timestamps;
89
90   /* Array of u_trace_payload_buf referenced by traces[] elements.
91    */
92   struct u_vector payloads;
93
94   /* Current payload buffer being written. */
95   struct u_trace_payload_buf *payload;
96
97   struct util_queue_fence fence;
98
99   bool last;          /* this chunk is last in batch */
100   bool eof;           /* this chunk is last in frame */
101
102   void *flush_data; /* assigned by u_trace_flush */
103
104   /**
105    * Several chunks reference a single flush_data instance thus only
106    * one chunk should be designated to free the data.
107    */
108   bool free_flush_data;
109};
110
111struct u_trace_printer {
112   void (*start)(struct u_trace_context *utctx);
113   void (*end)(struct u_trace_context *utctx);
114   void (*start_of_frame)(struct u_trace_context *utctx);
115   void (*end_of_frame)(struct u_trace_context *utctx);
116   void (*start_of_batch)(struct u_trace_context *utctx);
117   void (*end_of_batch)(struct u_trace_context *utctx);
118   void (*event)(struct u_trace_context *utctx,
119                 struct u_trace_chunk *chunk,
120                 const struct u_trace_event *evt,
121                 uint64_t ns, int32_t delta);
122};
123
124static void
125print_txt_start(struct u_trace_context *utctx)
126{
127
128}
129
130static void
131print_txt_end_of_frame(struct u_trace_context *utctx)
132{
133   fprintf(utctx->out, "END OF FRAME %u\n", utctx->frame_nr);
134}
135
136static void
137print_txt_start_of_batch(struct u_trace_context *utctx)
138{
139   fprintf(utctx->out, "+----- NS -----+ +-- Δ --+  +----- MSG -----\n");
140}
141
142static void
143print_txt_end_of_batch(struct u_trace_context *utctx)
144{
145   uint64_t elapsed = utctx->last_time_ns - utctx->first_time_ns;
146   fprintf(utctx->out, "ELAPSED: %"PRIu64" ns\n", elapsed);
147}
148
149static void
150print_txt_event(struct u_trace_context *utctx,
151                struct u_trace_chunk *chunk,
152                const struct u_trace_event *evt,
153                uint64_t ns, int32_t delta)
154{
155   if (evt->tp->print) {
156      fprintf(utctx->out, "%016"PRIu64" %+9d: %s: ", ns, delta, evt->tp->name);
157      evt->tp->print(utctx->out, evt->payload);
158   } else {
159      fprintf(utctx->out, "%016"PRIu64" %+9d: %s\n", ns, delta, evt->tp->name);
160   }
161}
162
163static struct u_trace_printer txt_printer = {
164   .start = &print_txt_start,
165   .end = &print_txt_start,
166   .start_of_frame = &print_txt_start,
167   .end_of_frame = &print_txt_end_of_frame,
168   .start_of_batch = &print_txt_start_of_batch,
169   .end_of_batch = &print_txt_end_of_batch,
170   .event = &print_txt_event,
171};
172
173static void
174print_json_start(struct u_trace_context *utctx)
175{
176   fprintf(utctx->out, "[\n");
177}
178
179static void
180print_json_end(struct u_trace_context *utctx)
181{
182   fprintf(utctx->out, "\n]");
183}
184
185static void
186print_json_start_of_frame(struct u_trace_context *utctx)
187{
188   if (utctx->frame_nr != 0)
189      fprintf(utctx->out, ",\n");
190   fprintf(utctx->out, "{\n\"frame\": %u,\n", utctx->frame_nr);
191   fprintf(utctx->out, "\"batches\": [\n");
192}
193
194static void
195print_json_end_of_frame(struct u_trace_context *utctx)
196{
197   fprintf(utctx->out, "]\n}\n");
198   fflush(utctx->out);
199}
200
201static void
202print_json_start_of_batch(struct u_trace_context *utctx)
203{
204   if (utctx->batch_nr != 0)
205      fprintf(utctx->out, ",\n");
206   fprintf(utctx->out, "{\n\"events\": [\n");
207}
208
209static void
210print_json_end_of_batch(struct u_trace_context *utctx)
211{
212   uint64_t elapsed = utctx->last_time_ns - utctx->first_time_ns;
213   fprintf(utctx->out, "],\n");
214   fprintf(utctx->out, "\"duration_ns\": %"PRIu64"\n", elapsed);
215   fprintf(utctx->out, "}\n");
216}
217
218static void
219print_json_event(struct u_trace_context *utctx,
220                 struct u_trace_chunk *chunk,
221                 const struct u_trace_event *evt,
222                 uint64_t ns, int32_t delta)
223{
224   if (utctx->event_nr != 0)
225      fprintf(utctx->out, ",\n");
226   fprintf(utctx->out, "{\n\"event\": \"%s\",\n", evt->tp->name);
227   fprintf(utctx->out, "\"time_ns\": \"%016"PRIu64"\",\n", ns);
228   fprintf(utctx->out, "\"params\": {");
229   if (evt->tp->print)
230      evt->tp->print_json(utctx->out, evt->payload);
231   fprintf(utctx->out, "}\n}\n");
232}
233
234static struct u_trace_printer json_printer = {
235   .start = print_json_start,
236   .end = print_json_end,
237   .start_of_frame = &print_json_start_of_frame,
238   .end_of_frame = &print_json_end_of_frame,
239   .start_of_batch = &print_json_start_of_batch,
240   .end_of_batch = &print_json_end_of_batch,
241   .event = &print_json_event,
242};
243
244static struct u_trace_payload_buf *
245u_trace_payload_buf_create(void)
246{
247   struct u_trace_payload_buf *payload =
248      malloc(sizeof(*payload) + PAYLOAD_BUFFER_SIZE);
249
250   p_atomic_set(&payload->refcount, 1);
251
252   payload->buf = (uint8_t *) (payload + 1);
253   payload->end = payload->buf + PAYLOAD_BUFFER_SIZE;
254   payload->next = payload->buf;
255
256   return payload;
257}
258
259static struct u_trace_payload_buf *
260u_trace_payload_buf_ref(struct u_trace_payload_buf *payload)
261{
262   p_atomic_inc(&payload->refcount);
263   return payload;
264}
265
266static void
267u_trace_payload_buf_unref(struct u_trace_payload_buf *payload)
268{
269   if (p_atomic_dec_zero(&payload->refcount))
270      free(payload);
271}
272
273static void
274free_chunk(void *ptr)
275{
276   struct u_trace_chunk *chunk = ptr;
277
278   chunk->utctx->delete_timestamp_buffer(chunk->utctx, chunk->timestamps);
279
280   /* Unref payloads attached to this chunk. */
281   struct u_trace_payload_buf **payload;
282   u_vector_foreach(payload, &chunk->payloads)
283      u_trace_payload_buf_unref(*payload);
284   u_vector_finish(&chunk->payloads);
285
286   list_del(&chunk->node);
287   free(chunk);
288}
289
290static void
291free_chunks(struct list_head *chunks)
292{
293   while (!list_is_empty(chunks)) {
294      struct u_trace_chunk *chunk = list_first_entry(chunks,
295            struct u_trace_chunk, node);
296      free_chunk(chunk);
297   }
298}
299
300static struct u_trace_chunk *
301get_chunk(struct u_trace *ut, size_t payload_size)
302{
303   struct u_trace_chunk *chunk;
304
305   assert(payload_size <= PAYLOAD_BUFFER_SIZE);
306
307   /* do we currently have a non-full chunk to append msgs to? */
308   if (!list_is_empty(&ut->trace_chunks)) {
309           chunk = list_last_entry(&ut->trace_chunks,
310                           struct u_trace_chunk, node);
311           /* Can we store a new trace in the chunk? */
312           if (chunk->num_traces < TRACES_PER_CHUNK) {
313              /* If no payload required, nothing else to check. */
314              if (payload_size <= 0)
315                 return chunk;
316
317              /* If the payload buffer has space for the payload, we're good.
318               */
319              if (chunk->payload &&
320                  (chunk->payload->end - chunk->payload->next) >= payload_size)
321                 return chunk;
322
323              /* If we don't have enough space in the payload buffer, can we
324               * allocate a new one?
325               */
326              struct u_trace_payload_buf **buf = u_vector_add(&chunk->payloads);
327              *buf = u_trace_payload_buf_create();
328              chunk->payload = *buf;
329              return chunk;
330           }
331           /* we need to expand to add another chunk to the batch, so
332            * the current one is no longer the last one of the batch:
333            */
334           chunk->last = false;
335   }
336
337   /* .. if not, then create a new one: */
338   chunk = calloc(1, sizeof(*chunk));
339
340   chunk->utctx = ut->utctx;
341   chunk->timestamps = ut->utctx->create_timestamp_buffer(ut->utctx, TIMESTAMP_BUF_SIZE);
342   chunk->last = true;
343   u_vector_init(&chunk->payloads, 4, sizeof(struct u_trace_payload_buf *));
344   if (payload_size > 0) {
345      struct u_trace_payload_buf **buf = u_vector_add(&chunk->payloads);
346      *buf = u_trace_payload_buf_create();
347      chunk->payload = *buf;
348   }
349
350   list_addtail(&chunk->node, &ut->trace_chunks);
351
352   return chunk;
353}
354
355DEBUG_GET_ONCE_BOOL_OPTION(trace_instrument, "GPU_TRACE_INSTRUMENT", false)
356DEBUG_GET_ONCE_BOOL_OPTION(trace, "GPU_TRACE", false)
357DEBUG_GET_ONCE_FILE_OPTION(trace_file, "GPU_TRACEFILE", NULL, "w")
358DEBUG_GET_ONCE_OPTION(trace_format, "GPU_TRACE_FORMAT", "txt")
359
360static FILE *
361get_tracefile(void)
362{
363   static FILE *tracefile = NULL;
364   static bool firsttime = true;
365
366   if (firsttime) {
367      tracefile = debug_get_option_trace_file();
368      if (!tracefile && debug_get_option_trace()) {
369         tracefile = stdout;
370      }
371
372      ut_trace_instrument = debug_get_option_trace_instrument();
373
374      firsttime = false;
375   }
376
377   return tracefile;
378}
379
380static void
381queue_init(struct u_trace_context *utctx)
382{
383   if (utctx->queue.jobs)
384      return;
385
386   bool ret = util_queue_init(&utctx->queue, "traceq", 256, 1,
387                              UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY |
388                              UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL);
389   assert(ret);
390
391   if (!ret)
392      utctx->out = NULL;
393}
394
395void
396u_trace_context_init(struct u_trace_context *utctx,
397      void *pctx,
398      u_trace_create_ts_buffer  create_timestamp_buffer,
399      u_trace_delete_ts_buffer  delete_timestamp_buffer,
400      u_trace_record_ts         record_timestamp,
401      u_trace_read_ts           read_timestamp,
402      u_trace_delete_flush_data delete_flush_data)
403{
404   utctx->pctx = pctx;
405   utctx->create_timestamp_buffer = create_timestamp_buffer;
406   utctx->delete_timestamp_buffer = delete_timestamp_buffer;
407   utctx->record_timestamp = record_timestamp;
408   utctx->read_timestamp = read_timestamp;
409   utctx->delete_flush_data = delete_flush_data;
410
411   utctx->last_time_ns = 0;
412   utctx->first_time_ns = 0;
413   utctx->frame_nr = 0;
414   utctx->batch_nr = 0;
415   utctx->event_nr = 0;
416   utctx->start_of_frame = true;
417
418   list_inithead(&utctx->flushed_trace_chunks);
419
420   utctx->out = get_tracefile();
421
422   const char *trace_format = debug_get_option_trace_format();
423   if (strcmp(trace_format, "json") == 0) {
424      utctx->out_printer = &json_printer;
425   } else {
426      utctx->out_printer = &txt_printer;
427   }
428
429#ifdef HAVE_PERFETTO
430   list_add(&utctx->node, &ctx_list);
431#endif
432
433   if (!u_trace_context_actively_tracing(utctx))
434      return;
435
436   queue_init(utctx);
437
438   if (utctx->out) {
439      utctx->out_printer->start(utctx);
440   }
441}
442
443void
444u_trace_context_fini(struct u_trace_context *utctx)
445{
446#ifdef HAVE_PERFETTO
447   list_del(&utctx->node);
448#endif
449
450   if (utctx->out) {
451      utctx->out_printer->end(utctx);
452      fflush(utctx->out);
453   }
454
455   if (!utctx->queue.jobs)
456      return;
457   util_queue_finish(&utctx->queue);
458   util_queue_destroy(&utctx->queue);
459   free_chunks(&utctx->flushed_trace_chunks);
460}
461
462#ifdef HAVE_PERFETTO
463void
464u_trace_perfetto_start(void)
465{
466   list_for_each_entry (struct u_trace_context, utctx, &ctx_list, node)
467      queue_init(utctx);
468   ut_perfetto_enabled++;
469}
470
471void
472u_trace_perfetto_stop(void)
473{
474   assert(ut_perfetto_enabled > 0);
475   ut_perfetto_enabled--;
476}
477#endif
478
479static void
480process_chunk(void *job, void *gdata, int thread_index)
481{
482   struct u_trace_chunk *chunk = job;
483   struct u_trace_context *utctx = chunk->utctx;
484
485   if (utctx->start_of_frame) {
486      utctx->start_of_frame = false;
487      utctx->batch_nr = 0;
488      if (utctx->out) {
489         utctx->out_printer->start_of_frame(utctx);
490      }
491   }
492
493   /* For first chunk of batch, accumulated times will be zerod: */
494   if (!utctx->last_time_ns) {
495      utctx->event_nr = 0;
496      if (utctx->out) {
497         utctx->out_printer->start_of_batch(utctx);
498      }
499   }
500
501   for (unsigned idx = 0; idx < chunk->num_traces; idx++) {
502      const struct u_trace_event *evt = &chunk->traces[idx];
503
504      if (!evt->tp)
505         continue;
506
507      uint64_t ns = utctx->read_timestamp(utctx, chunk->timestamps, idx, chunk->flush_data);
508      int32_t delta;
509
510      if (!utctx->first_time_ns)
511         utctx->first_time_ns = ns;
512
513      if (ns != U_TRACE_NO_TIMESTAMP) {
514         delta = utctx->last_time_ns ? ns - utctx->last_time_ns : 0;
515         utctx->last_time_ns = ns;
516      } else {
517         /* we skipped recording the timestamp, so it should be
518          * the same as last msg:
519          */
520         ns = utctx->last_time_ns;
521         delta = 0;
522      }
523
524      if (utctx->out) {
525         utctx->out_printer->event(utctx, chunk, evt, ns, delta);
526      }
527#ifdef HAVE_PERFETTO
528      if (evt->tp->perfetto) {
529         evt->tp->perfetto(utctx->pctx, ns, chunk->flush_data, evt->payload);
530      }
531#endif
532
533      utctx->event_nr++;
534   }
535
536   if (chunk->last) {
537      if (utctx->out) {
538         utctx->out_printer->end_of_batch(utctx);
539      }
540
541      utctx->batch_nr++;
542      utctx->last_time_ns = 0;
543      utctx->first_time_ns = 0;
544   }
545
546   if (chunk->eof) {
547      if (utctx->out) {
548         utctx->out_printer->end_of_frame(utctx);
549      }
550      utctx->frame_nr++;
551      utctx->start_of_frame = true;
552   }
553
554   if (chunk->free_flush_data && utctx->delete_flush_data) {
555      utctx->delete_flush_data(utctx, chunk->flush_data);
556   }
557}
558
559static void
560cleanup_chunk(void *job, void *gdata, int thread_index)
561{
562   free_chunk(job);
563}
564
565void
566u_trace_context_process(struct u_trace_context *utctx, bool eof)
567{
568   struct list_head *chunks = &utctx->flushed_trace_chunks;
569
570   if (list_is_empty(chunks))
571      return;
572
573   struct u_trace_chunk *last_chunk = list_last_entry(chunks,
574            struct u_trace_chunk, node);
575   last_chunk->eof = eof;
576
577   while (!list_is_empty(chunks)) {
578      struct u_trace_chunk *chunk = list_first_entry(chunks,
579            struct u_trace_chunk, node);
580
581      /* remove from list before enqueuing, because chunk is freed
582       * once it is processed by the queue:
583       */
584      list_delinit(&chunk->node);
585
586      util_queue_add_job(&utctx->queue, chunk, &chunk->fence,
587            process_chunk, cleanup_chunk,
588            TIMESTAMP_BUF_SIZE);
589   }
590}
591
592
593void
594u_trace_init(struct u_trace *ut, struct u_trace_context *utctx)
595{
596   ut->utctx = utctx;
597   list_inithead(&ut->trace_chunks);
598   ut->enabled = u_trace_context_instrumenting(utctx);
599}
600
601void
602u_trace_fini(struct u_trace *ut)
603{
604   /* Normally the list of trace-chunks would be empty, if they
605    * have been flushed to the trace-context.
606    */
607   free_chunks(&ut->trace_chunks);
608}
609
610bool
611u_trace_has_points(struct u_trace *ut)
612{
613   return !list_is_empty(&ut->trace_chunks);
614}
615
616struct u_trace_iterator
617u_trace_begin_iterator(struct u_trace *ut)
618{
619   if (!ut->enabled)
620      return (struct u_trace_iterator) {NULL, NULL, 0};
621
622   if (list_is_empty(&ut->trace_chunks))
623      return (struct u_trace_iterator) { ut, NULL, 0 };
624
625   struct u_trace_chunk *first_chunk =
626      list_first_entry(&ut->trace_chunks, struct u_trace_chunk, node);
627
628   return (struct u_trace_iterator) { ut, first_chunk, 0};
629}
630
631struct u_trace_iterator
632u_trace_end_iterator(struct u_trace *ut)
633{
634   if (!ut->enabled)
635      return (struct u_trace_iterator) {NULL, NULL, 0};
636
637   if (list_is_empty(&ut->trace_chunks))
638      return (struct u_trace_iterator) { ut, NULL, 0 };
639
640   struct u_trace_chunk *last_chunk =
641      list_last_entry(&ut->trace_chunks, struct u_trace_chunk, node);
642
643   return (struct u_trace_iterator) { ut, last_chunk, last_chunk->num_traces};
644}
645
646/* If an iterator was created when there were no chunks and there are now
647 * chunks, "sanitize" it to include the first chunk.
648 */
649static struct u_trace_iterator
650sanitize_iterator(struct u_trace_iterator iter)
651{
652   if (iter.ut && !iter.chunk && !list_is_empty(&iter.ut->trace_chunks)) {
653      iter.chunk = list_first_entry(&iter.ut->trace_chunks, struct
654                                    u_trace_chunk, node);
655   }
656
657   return iter;
658}
659
660bool
661u_trace_iterator_equal(struct u_trace_iterator a,
662                       struct u_trace_iterator b)
663{
664   a = sanitize_iterator(a);
665   b = sanitize_iterator(b);
666   return a.ut == b.ut &&
667          a.chunk == b.chunk &&
668          a.event_idx == b.event_idx;
669}
670
671void
672u_trace_clone_append(struct u_trace_iterator begin_it,
673                     struct u_trace_iterator end_it,
674                     struct u_trace *into,
675                     void *cmdstream,
676                     u_trace_copy_ts_buffer copy_ts_buffer)
677{
678   begin_it = sanitize_iterator(begin_it);
679   end_it = sanitize_iterator(end_it);
680
681   struct u_trace_chunk *from_chunk = begin_it.chunk;
682   uint32_t from_idx = begin_it.event_idx;
683
684   while (from_chunk != end_it.chunk || from_idx != end_it.event_idx) {
685      struct u_trace_chunk *to_chunk = get_chunk(into, 0 /* payload_size */);
686
687      unsigned to_copy = MIN2(TRACES_PER_CHUNK - to_chunk->num_traces,
688                              from_chunk->num_traces - from_idx);
689      if (from_chunk == end_it.chunk)
690         to_copy = MIN2(to_copy, end_it.event_idx - from_idx);
691
692      copy_ts_buffer(begin_it.ut->utctx, cmdstream,
693                     from_chunk->timestamps, from_idx,
694                     to_chunk->timestamps, to_chunk->num_traces,
695                     to_copy);
696
697      memcpy(&to_chunk->traces[to_chunk->num_traces],
698             &from_chunk->traces[from_idx],
699             to_copy * sizeof(struct u_trace_event));
700
701      /* Take a refcount on payloads from from_chunk if needed. */
702      if (begin_it.ut != into) {
703         struct u_trace_payload_buf **in_payload;
704         u_vector_foreach(in_payload, &from_chunk->payloads) {
705            struct u_trace_payload_buf **out_payload =
706               u_vector_add(&to_chunk->payloads);
707
708            *out_payload = u_trace_payload_buf_ref(*in_payload);
709         }
710      }
711
712      to_chunk->num_traces += to_copy;
713      from_idx += to_copy;
714
715      assert(from_idx <= from_chunk->num_traces);
716      if (from_idx == from_chunk->num_traces) {
717         if (from_chunk == end_it.chunk)
718            break;
719
720         from_idx = 0;
721         from_chunk = list_entry(from_chunk->node.next, struct u_trace_chunk, node);
722      }
723   }
724}
725
726void
727u_trace_disable_event_range(struct u_trace_iterator begin_it,
728                            struct u_trace_iterator end_it)
729{
730   begin_it = sanitize_iterator(begin_it);
731   end_it = sanitize_iterator(end_it);
732
733   struct u_trace_chunk *current_chunk = begin_it.chunk;
734   uint32_t start_idx = begin_it.event_idx;
735
736   while(current_chunk != end_it.chunk) {
737      memset(&current_chunk->traces[start_idx], 0,
738             (current_chunk->num_traces - start_idx) * sizeof(struct u_trace_event));
739      start_idx = 0;
740      current_chunk = list_entry(current_chunk->node.next, struct u_trace_chunk, node);
741   }
742
743   memset(&current_chunk->traces[start_idx], 0,
744          (end_it.event_idx - start_idx) * sizeof(struct u_trace_event));
745}
746
747/**
748 * Append a trace event, returning pointer to buffer of tp->payload_sz
749 * to be filled in with trace payload.  Called by generated tracepoint
750 * functions.
751 */
752void *
753u_trace_append(struct u_trace *ut, void *cs, const struct u_tracepoint *tp)
754{
755   struct u_trace_chunk *chunk = get_chunk(ut, tp->payload_sz);
756   unsigned tp_idx = chunk->num_traces++;
757
758   assert(tp->payload_sz == ALIGN_NPOT(tp->payload_sz, 8));
759
760   /* sub-allocate storage for trace payload: */
761   void *payload = NULL;
762   if (tp->payload_sz > 0) {
763      payload = chunk->payload->next;
764      chunk->payload->next += tp->payload_sz;
765   }
766
767   /* record a timestamp for the trace: */
768   ut->utctx->record_timestamp(ut, cs, chunk->timestamps, tp_idx, tp->end_of_pipe);
769
770   chunk->traces[tp_idx] = (struct u_trace_event) {
771         .tp = tp,
772         .payload = payload,
773   };
774
775   return payload;
776}
777
778void
779u_trace_flush(struct u_trace *ut, void *flush_data, bool free_data)
780{
781   list_for_each_entry(struct u_trace_chunk, chunk, &ut->trace_chunks, node) {
782      chunk->flush_data = flush_data;
783      chunk->free_flush_data = false;
784   }
785
786   if (free_data && !list_is_empty(&ut->trace_chunks)) {
787      struct u_trace_chunk *last_chunk =
788         list_last_entry(&ut->trace_chunks, struct u_trace_chunk, node);
789      last_chunk->free_flush_data = true;
790   }
791
792   /* transfer batch's log chunks to context: */
793   list_splicetail(&ut->trace_chunks, &ut->utctx->flushed_trace_chunks);
794   list_inithead(&ut->trace_chunks);
795}
796