Lines Matching defs:thread

25 #include "thread.h"
47 struct thread *th, bool lock);
79 return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread);
82 static struct thread_rb_node *thread_rb_node__find(const struct thread *th,
104 static void thread__set_guest_comm(struct thread *thread, pid_t pid)
109 thread__set_comm(thread, comm, 0);
145 struct thread *thread = machine__findnew_thread(machine, -1,
148 if (thread == NULL)
151 thread__set_guest_comm(thread, pid);
152 thread__put(thread);
236 __machine__remove_thread(machine, trb, trb->thread, false);
418 * To support that, copy the host thread's maps to the guest thread's maps.
421 * thread's maps have been set up.
423 * This function returns the guest thread. Apart from keeping the data
424 * structures sane, using a thread belonging to the guest machine, instead
425 * of the host thread, allows it to have its own comm (refer
428 static struct thread *findnew_guest_code(struct machine *machine,
432 struct thread *host_thread;
433 struct thread *thread;
439 thread = machine__findnew_thread(machine, -1, pid);
440 if (!thread)
444 if (maps__nr_maps(thread__maps(thread)))
445 return thread;
451 thread__set_guest_comm(thread, pid);
457 err = maps__clone(thread, thread__maps(host_thread));
462 return thread;
465 thread__zput(thread);
469 struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
477 struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
518 struct thread *th, pid_t pid)
520 struct thread *leader;
546 * tid. Consequently there never should be any maps on a thread
550 pr_err("Discarding thread maps for %d:%d\n",
569 static struct thread*
573 struct thread *th;
588 static struct thread*
592 struct thread *th = NULL;
601 __threads__set_last_match(struct threads *threads, struct thread *th)
608 threads__set_last_match(struct threads *threads, struct thread *th)
615 * Caller must eventually drop thread->refcnt returned with a successful
616 * lookup/new thread inserted.
618 static struct thread *____machine__findnew_thread(struct machine *machine,
625 struct thread *th;
635 th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
663 nd->thread = th;
671 * thread__init_maps to find the thread leader and that would screwed
675 pr_err("Thread init failed thread %d\n", pid);
691 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
696 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
700 struct thread *th;
708 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
712 struct thread *th;
722 * So here a single thread is created for that, but actually there is a separate
723 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
727 struct thread *machine__idle_thread(struct machine *machine)
729 struct thread *thread = machine__findnew_thread(machine, 0, 0);
731 if (!thread || thread__set_comm(thread, "swapper", 0) ||
732 thread__set_namespaces(thread, 0, NULL))
735 return thread;
739 struct thread *thread)
742 return thread__exec_comm(thread);
744 return thread__comm(thread);
750 struct thread *thread = machine__findnew_thread(machine,
762 if (thread == NULL ||
763 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
768 thread__put(thread);
777 struct thread *thread = machine__findnew_thread(machine,
793 if (thread == NULL ||
794 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
799 thread__put(thread);
1139 struct thread *pos = rb_entry(nd, struct thread_rb_node, rb_node)->thread;
1930 struct thread *thread;
1964 thread = machine__findnew_thread(machine, event->mmap2.pid,
1966 if (thread == NULL)
1973 event->mmap2.filename, thread);
1978 ret = thread__insert_map(thread, map);
1982 thread__put(thread);
1989 thread__put(thread);
1998 struct thread *thread;
2021 thread = machine__findnew_thread(machine, event->mmap.pid,
2023 if (thread == NULL)
2031 NULL, prot, 0, NULL, event->mmap.filename, thread);
2036 ret = thread__insert_map(thread, map);
2040 thread__put(thread);
2047 thread__put(thread);
2054 struct thread *th, bool lock)
2069 thread__put(nd->thread);
2080 void machine__remove_thread(struct machine *machine, struct thread *th)
2088 struct thread *thread = machine__find_thread(machine,
2091 struct thread *parent = machine__findnew_thread(machine,
2101 * There may be an existing thread that is not actually the parent,
2103 * (fork) event that would have removed the thread was lost. Assume the
2107 dump_printf("removing erroneous parent thread %d/%d\n",
2115 /* if a thread currently exists for the thread id remove it */
2116 if (thread != NULL) {
2117 machine__remove_thread(machine, thread);
2118 thread__put(thread);
2121 thread = machine__findnew_thread(machine, event->fork.pid,
2124 * When synthesizing FORK events, we are trying to create thread
2140 if (thread == NULL || parent == NULL ||
2141 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
2145 thread__put(thread);
2154 struct thread *thread = machine__find_thread(machine,
2161 if (thread != NULL)
2162 thread__put(thread);
2221 static void ip__resolve_ams(struct thread *thread,
2235 thread__find_cpumode_addr_location(thread, ip, &al);
2248 static void ip__resolve_data(struct thread *thread,
2256 thread__find_symbol(thread, m, addr, &al);
2277 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
2278 ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
2314 static int add_callchain_ip(struct thread *thread,
2336 thread__find_cpumode_addr_location(thread, ip, &al);
2362 thread__find_symbol(thread, *cpumode, ip, &al);
2412 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2413 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2483 static int lbr_callchain_add_kernel_ip(struct thread *thread,
2497 err = add_callchain_ip(thread, cursor, parent,
2507 err = add_callchain_ip(thread, cursor, parent,
2517 static void save_lbr_cursor_node(struct thread *thread,
2521 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2542 static int lbr_callchain_add_lbr_ip(struct thread *thread,
2563 if (thread__lbr_stitch(thread)) {
2578 err = add_callchain_ip(thread, cursor, parent,
2591 if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
2603 err = add_callchain_ip(thread, cursor, parent,
2609 save_lbr_cursor_node(thread, cursor, i);
2618 err = add_callchain_ip(thread, cursor, parent,
2624 save_lbr_cursor_node(thread, cursor, i);
2632 err = add_callchain_ip(thread, cursor, parent,
2643 static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2646 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2668 static struct stitch_list *get_stitch_node(struct thread *thread)
2670 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2684 static bool has_stitched_lbr(struct thread *thread,
2694 struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2740 stitch_node = get_stitch_node(thread);
2756 static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2758 if (thread__lbr_stitch(thread))
2761 thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
2762 if (!thread__lbr_stitch(thread))
2765 thread__lbr_stitch(thread)->prev_lbr_cursor =
2767 if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
2770 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
2771 INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
2776 free(thread__lbr_stitch(thread));
2777 thread__set_lbr_stitch(thread, NULL);
2780 thread__set_lbr_stitch_enable(thread, false);
2791 static int resolve_lbr_callchain_sample(struct thread *thread,
2816 if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
2817 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2818 lbr_stitch = thread__lbr_stitch(thread);
2820 stitched_lbr = has_stitched_lbr(thread, sample,
2833 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2839 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2845 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2852 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2856 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2862 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2874 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2886 err = add_callchain_ip(thread, cursor, parent,
2896 struct thread *thread, int usr_idx)
2898 if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
2899 return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2904 static int thread__resolve_callchain_sample(struct thread *thread,
2928 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2939 skip_idx = arch_skip_callchain_idx(thread, chain);
2991 err = add_callchain_ip(thread, cursor, parent,
2998 err = add_callchain_ip(thread, cursor, parent, root_al,
3016 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
3038 err = find_prev_cpumode(chain, thread, cursor, parent,
3056 leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
3065 err = add_callchain_ip(thread, cursor, parent,
3073 err = add_callchain_ip(thread, cursor, parent,
3152 static int thread__resolve_callchain_unwind(struct thread *thread,
3169 thread, sample, max_stack, false);
3172 int thread__resolve_callchain(struct thread *thread,
3188 ret = thread__resolve_callchain_sample(thread, cursor,
3194 ret = thread__resolve_callchain_unwind(thread, cursor,
3198 ret = thread__resolve_callchain_unwind(thread, cursor,
3203 ret = thread__resolve_callchain_sample(thread, cursor,
3213 int (*fn)(struct thread *thread, void *p),
3227 rc = fn(trb->thread, priv);
3236 int (*fn)(struct thread *thread, void *p),
3267 struct thread *thread;
3281 thread = machine__findnew_thread(machine, pid, tid);
3282 if (!thread)
3285 thread__set_cpu(thread, cpu);
3286 thread__put(thread);