Lines Matching defs:tidq

454 	struct cs_etm_traceid_queue *tidq;
459 tidq = etmq->traceid_queues[idx];
460 cs_etm__clear_packet_queue(&tidq->packet_queue);
465 struct cs_etm_traceid_queue *tidq,
472 cs_etm__clear_packet_queue(&tidq->packet_queue);
475 tidq->trace_chan_id = trace_chan_id;
476 tidq->el = tidq->prev_packet_el = ocsd_EL_unknown;
477 tidq->thread = machine__findnew_thread(&etm->session->machines.host, -1,
479 tidq->prev_packet_thread = machine__idle_thread(&etm->session->machines.host);
481 tidq->packet = zalloc(sizeof(struct cs_etm_packet));
482 if (!tidq->packet)
485 tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
486 if (!tidq->prev_packet)
494 tidq->last_branch = zalloc(sz);
495 if (!tidq->last_branch)
497 tidq->last_branch_rb = zalloc(sz);
498 if (!tidq->last_branch_rb)
502 tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
503 if (!tidq->event_buf)
509 zfree(&tidq->last_branch_rb);
510 zfree(&tidq->last_branch);
511 zfree(&tidq->prev_packet);
512 zfree(&tidq->packet);
523 struct cs_etm_traceid_queue *tidq, **traceid_queues;
542 tidq = malloc(sizeof(*tidq));
543 if (!tidq)
546 memset(tidq, 0, sizeof(*tidq));
558 if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
574 traceid_queues[idx] = tidq;
585 free(tidq);
593 struct cs_etm_traceid_queue *tidq;
595 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
596 if (tidq)
597 return &tidq->packet_queue;
603 struct cs_etm_traceid_queue *tidq)
621 tmp = tidq->packet;
622 tidq->packet = tidq->prev_packet;
623 tidq->prev_packet = tmp;
624 tidq->prev_packet_el = tidq->el;
625 thread__put(tidq->prev_packet_thread);
626 tidq->prev_packet_thread = thread__get(tidq->thread);
792 struct cs_etm_traceid_queue *tidq;
800 tidq = etmq->traceid_queues[idx];
801 thread__zput(tidq->thread);
802 thread__zput(tidq->prev_packet_thread);
803 zfree(&tidq->event_buf);
804 zfree(&tidq->last_branch);
805 zfree(&tidq->last_branch_rb);
806 zfree(&tidq->prev_packet);
807 zfree(&tidq->packet);
808 zfree(&tidq);
952 struct cs_etm_traceid_queue *tidq;
959 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
960 if (!tidq)
974 assert(tidq->el == ocsd_EL1 || tidq->el == ocsd_EL0);
976 assert(tidq->el == ocsd_EL2);
978 assert(tidq->el == ocsd_EL3);
981 cpumode = cs_etm__cpu_mode(etmq, address, tidq->el);
983 if (!thread__find_map(tidq->thread, cpumode, address, &al))
998 len = dso__data_read_offset(dso, maps__machine(thread__maps(tidq->thread)),
1177 struct cs_etm_traceid_queue *tidq)
1179 struct branch_stack *bs_src = tidq->last_branch_rb;
1180 struct branch_stack *bs_dst = tidq->last_branch;
1200 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
1202 &bs_src->entries[tidq->last_branch_pos],
1215 sizeof(struct branch_entry) * tidq->last_branch_pos);
1220 void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
1222 tidq->last_branch_pos = 0;
1223 tidq->last_branch_rb->nr = 0;
1281 struct cs_etm_traceid_queue *tidq)
1283 struct branch_stack *bs = tidq->last_branch_rb;
1292 if (!tidq->last_branch_pos)
1293 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
1295 tidq->last_branch_pos -= 1;
1297 be = &bs->entries[tidq->last_branch_pos];
1298 be->from = cs_etm__last_executed_instr(tidq->prev_packet);
1299 be->to = cs_etm__first_executed_instr(tidq->packet);
1363 struct cs_etm_traceid_queue *tidq, pid_t tid,
1369 thread__zput(tidq->thread);
1370 tidq->thread = machine__find_thread(machine, -1, tid);
1374 if (!tidq->thread)
1375 tidq->thread = machine__idle_thread(machine);
1377 tidq->el = el;
1383 struct cs_etm_traceid_queue *tidq;
1385 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
1386 if (!tidq)
1389 cs_etm__set_thread(etmq, tidq, tid, el);
1438 struct cs_etm_traceid_queue *tidq)
1441 struct cs_etm_packet_queue *packet_queue = &tidq->packet_queue;
1450 struct cs_etm_traceid_queue *tidq,
1455 union perf_event *event = tidq->event_buf;
1459 event->sample.header.misc = cs_etm__cpu_mode(etmq, addr, tidq->el);
1463 sample.time = cs_etm__resolve_sample_time(etmq, tidq);
1466 sample.pid = thread__pid(tidq->thread);
1467 sample.tid = thread__tid(tidq->thread);
1471 sample.cpu = tidq->packet->cpu;
1472 sample.flags = tidq->prev_packet->flags;
1475 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
1478 sample.branch_stack = tidq->last_branch;
1502 struct cs_etm_traceid_queue *tidq)
1507 union perf_event *event = tidq->event_buf;
1515 ip = cs_etm__last_executed_instr(tidq->prev_packet);
1519 tidq->prev_packet_el);
1523 sample.time = cs_etm__resolve_sample_time(etmq, tidq);
1526 sample.pid = thread__pid(tidq->prev_packet_thread);
1527 sample.tid = thread__tid(tidq->prev_packet_thread);
1528 sample.addr = cs_etm__first_executed_instr(tidq->packet);
1532 sample.cpu = tidq->packet->cpu;
1533 sample.flags = tidq->prev_packet->flags;
1536 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
1686 struct cs_etm_traceid_queue *tidq)
1690 u8 trace_chan_id = tidq->trace_chan_id;
1694 instrs_prev = tidq->period_instructions;
1696 tidq->period_instructions += tidq->packet->instr_count;
1703 tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1704 tidq->prev_packet->last_instr_taken_branch)
1705 cs_etm__update_last_branch_rb(etmq, tidq);
1708 tidq->period_instructions >= etm->instructions_sample_period) {
1731 * tidq->packet->instr_count
1739 * tidq->packet->instr_count represents the number of
1753 * to tidq->period_instructions for next round calculation.
1766 cs_etm__copy_last_branch_rb(etmq, tidq);
1768 while (tidq->period_instructions >=
1777 tidq->packet, offset - 1);
1779 etmq, tidq, addr,
1785 tidq->period_instructions -=
1794 if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1798 if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1799 tidq->prev_packet->last_instr_taken_branch)
1803 ret = cs_etm__synth_branch_sample(etmq, tidq);
1809 cs_etm__packet_swap(etm, tidq);
1814 static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
1827 if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
1828 tidq->prev_packet->last_instr_taken_branch = true;
1834 struct cs_etm_traceid_queue *tidq)
1840 if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
1845 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1849 cs_etm__copy_last_branch_rb(etmq, tidq);
1858 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1861 etmq, tidq, addr,
1862 tidq->period_instructions);
1866 tidq->period_instructions = 0;
1871 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1872 err = cs_etm__synth_branch_sample(etmq, tidq);
1878 cs_etm__packet_swap(etm, tidq);
1882 cs_etm__reset_last_branch_rb(tidq);
1888 struct cs_etm_traceid_queue *tidq)
1903 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1907 cs_etm__copy_last_branch_rb(etmq, tidq);
1913 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1916 etmq, tidq, addr,
1917 tidq->period_instructions);
1921 tidq->period_instructions = 0;
2025 struct cs_etm_traceid_queue *tidq, u64 magic)
2027 u8 trace_chan_id = tidq->trace_chan_id;
2028 struct cs_etm_packet *packet = tidq->packet;
2029 struct cs_etm_packet *prev_packet = tidq->prev_packet;
2050 static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
2053 struct cs_etm_packet *packet = tidq->packet;
2077 struct cs_etm_traceid_queue *tidq,
2080 u8 trace_chan_id = tidq->trace_chan_id;
2081 struct cs_etm_packet *packet = tidq->packet;
2082 struct cs_etm_packet *prev_packet = tidq->prev_packet;
2126 struct cs_etm_traceid_queue *tidq)
2128 struct cs_etm_packet *packet = tidq->packet;
2129 struct cs_etm_packet *prev_packet = tidq->prev_packet;
2130 u8 trace_chan_id = tidq->trace_chan_id;
2233 if (cs_etm__is_syscall(etmq, tidq, magic))
2241 else if (cs_etm__is_async_exception(tidq, magic))
2250 else if (cs_etm__is_sync_exception(etmq, tidq, magic))
2333 struct cs_etm_traceid_queue *tidq)
2338 packet_queue = &tidq->packet_queue;
2343 tidq->packet);
2358 ret = cs_etm__set_sample_flags(etmq, tidq);
2362 switch (tidq->packet->sample_type) {
2369 cs_etm__sample(etmq, tidq);
2378 cs_etm__exception(tidq);
2385 cs_etm__flush(etmq, tidq);
2406 struct cs_etm_traceid_queue *tidq;
2411 tidq = etmq->traceid_queues[idx];
2414 cs_etm__process_traceid_queue(etmq, tidq);
2420 cs_etm__flush(etmq, tidq);
2427 struct cs_etm_traceid_queue *tidq;
2429 tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
2430 if (!tidq)
2450 err = cs_etm__process_traceid_queue(etmq, tidq);
2456 err = cs_etm__end_block(etmq, tidq);
2465 struct cs_etm_traceid_queue *tidq;
2489 tidq = etmq->traceid_queues[idx];
2490 cs_etm__process_traceid_queue(etmq, tidq);
2496 tidq = etmq->traceid_queues[idx];
2498 err = cs_etm__end_block(etmq, tidq);
2516 struct cs_etm_traceid_queue *tidq;
2522 tidq = cs_etm__etmq_get_traceid_queue(
2525 if (!tidq)
2528 if (tid == -1 || thread__tid(tidq->thread) == tid)
2545 struct cs_etm_traceid_queue *tidq;
2578 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
2579 if (!tidq) {
2593 ret = cs_etm__process_traceid_queue(etmq, tidq);