xref: /kernel/linux/linux-6.6/tools/perf/util/thread.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <stdlib.h>
4#include <stdio.h>
5#include <string.h>
6#include <linux/kernel.h>
7#include <linux/zalloc.h>
8#include "dso.h"
9#include "session.h"
10#include "thread.h"
11#include "thread-stack.h"
12#include "debug.h"
13#include "namespaces.h"
14#include "comm.h"
15#include "map.h"
16#include "symbol.h"
17#include "unwind.h"
18#include "callchain.h"
19
20#include <api/fs/fs.h>
21
22int thread__init_maps(struct thread *thread, struct machine *machine)
23{
24	pid_t pid = thread__pid(thread);
25
26	if (pid == thread__tid(thread) || pid == -1) {
27		thread__set_maps(thread, maps__new(machine));
28	} else {
29		struct thread *leader = __machine__findnew_thread(machine, pid, pid);
30
31		if (leader) {
32			thread__set_maps(thread, maps__get(thread__maps(leader)));
33			thread__put(leader);
34		}
35	}
36
37	return thread__maps(thread) ? 0 : -1;
38}
39
40struct thread *thread__new(pid_t pid, pid_t tid)
41{
42	char *comm_str;
43	struct comm *comm;
44	RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
45	struct thread *thread;
46
47	if (ADD_RC_CHK(thread, _thread) != NULL) {
48		thread__set_pid(thread, pid);
49		thread__set_tid(thread, tid);
50		thread__set_ppid(thread, -1);
51		thread__set_cpu(thread, -1);
52		thread__set_guest_cpu(thread, -1);
53		thread__set_lbr_stitch_enable(thread, false);
54		INIT_LIST_HEAD(thread__namespaces_list(thread));
55		INIT_LIST_HEAD(thread__comm_list(thread));
56		init_rwsem(thread__namespaces_lock(thread));
57		init_rwsem(thread__comm_lock(thread));
58
59		comm_str = malloc(32);
60		if (!comm_str)
61			goto err_thread;
62
63		snprintf(comm_str, 32, ":%d", tid);
64		comm = comm__new(comm_str, 0, false);
65		free(comm_str);
66		if (!comm)
67			goto err_thread;
68
69		list_add(&comm->list, thread__comm_list(thread));
70		refcount_set(thread__refcnt(thread), 1);
71		/* Thread holds first ref to nsdata. */
72		RC_CHK_ACCESS(thread)->nsinfo = nsinfo__new(pid);
73		srccode_state_init(thread__srccode_state(thread));
74	}
75
76	return thread;
77
78err_thread:
79	free(thread);
80	return NULL;
81}
82
83static void (*thread__priv_destructor)(void *priv);
84
85void thread__set_priv_destructor(void (*destructor)(void *priv))
86{
87	assert(thread__priv_destructor == NULL);
88
89	thread__priv_destructor = destructor;
90}
91
92void thread__delete(struct thread *thread)
93{
94	struct namespaces *namespaces, *tmp_namespaces;
95	struct comm *comm, *tmp_comm;
96
97	thread_stack__free(thread);
98
99	if (thread__maps(thread)) {
100		maps__put(thread__maps(thread));
101		thread__set_maps(thread, NULL);
102	}
103	down_write(thread__namespaces_lock(thread));
104	list_for_each_entry_safe(namespaces, tmp_namespaces,
105				 thread__namespaces_list(thread), list) {
106		list_del_init(&namespaces->list);
107		namespaces__free(namespaces);
108	}
109	up_write(thread__namespaces_lock(thread));
110
111	down_write(thread__comm_lock(thread));
112	list_for_each_entry_safe(comm, tmp_comm, thread__comm_list(thread), list) {
113		list_del_init(&comm->list);
114		comm__free(comm);
115	}
116	up_write(thread__comm_lock(thread));
117
118	nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo);
119	srccode_state_free(thread__srccode_state(thread));
120
121	exit_rwsem(thread__namespaces_lock(thread));
122	exit_rwsem(thread__comm_lock(thread));
123	thread__free_stitch_list(thread);
124
125	if (thread__priv_destructor)
126		thread__priv_destructor(thread__priv(thread));
127
128	RC_CHK_FREE(thread);
129}
130
131struct thread *thread__get(struct thread *thread)
132{
133	struct thread *result;
134
135	if (RC_CHK_GET(result, thread))
136		refcount_inc(thread__refcnt(thread));
137
138	return result;
139}
140
141void thread__put(struct thread *thread)
142{
143	if (thread && refcount_dec_and_test(thread__refcnt(thread)))
144		thread__delete(thread);
145	else
146		RC_CHK_PUT(thread);
147}
148
149static struct namespaces *__thread__namespaces(struct thread *thread)
150{
151	if (list_empty(thread__namespaces_list(thread)))
152		return NULL;
153
154	return list_first_entry(thread__namespaces_list(thread), struct namespaces, list);
155}
156
157struct namespaces *thread__namespaces(struct thread *thread)
158{
159	struct namespaces *ns;
160
161	down_read(thread__namespaces_lock(thread));
162	ns = __thread__namespaces(thread);
163	up_read(thread__namespaces_lock(thread));
164
165	return ns;
166}
167
168static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
169				    struct perf_record_namespaces *event)
170{
171	struct namespaces *new, *curr = __thread__namespaces(thread);
172
173	new = namespaces__new(event);
174	if (!new)
175		return -ENOMEM;
176
177	list_add(&new->list, thread__namespaces_list(thread));
178
179	if (timestamp && curr) {
180		/*
181		 * setns syscall must have changed few or all the namespaces
182		 * of this thread. Update end time for the namespaces
183		 * previously used.
184		 */
185		curr = list_next_entry(new, list);
186		curr->end_time = timestamp;
187	}
188
189	return 0;
190}
191
192int thread__set_namespaces(struct thread *thread, u64 timestamp,
193			   struct perf_record_namespaces *event)
194{
195	int ret;
196
197	down_write(thread__namespaces_lock(thread));
198	ret = __thread__set_namespaces(thread, timestamp, event);
199	up_write(thread__namespaces_lock(thread));
200	return ret;
201}
202
203struct comm *thread__comm(struct thread *thread)
204{
205	if (list_empty(thread__comm_list(thread)))
206		return NULL;
207
208	return list_first_entry(thread__comm_list(thread), struct comm, list);
209}
210
211struct comm *thread__exec_comm(struct thread *thread)
212{
213	struct comm *comm, *last = NULL, *second_last = NULL;
214
215	list_for_each_entry(comm, thread__comm_list(thread), list) {
216		if (comm->exec)
217			return comm;
218		second_last = last;
219		last = comm;
220	}
221
222	/*
223	 * 'last' with no start time might be the parent's comm of a synthesized
224	 * thread (created by processing a synthesized fork event). For a main
225	 * thread, that is very probably wrong. Prefer a later comm to avoid
226	 * that case.
227	 */
228	if (second_last && !last->start && thread__pid(thread) == thread__tid(thread))
229		return second_last;
230
231	return last;
232}
233
234static int ____thread__set_comm(struct thread *thread, const char *str,
235				u64 timestamp, bool exec)
236{
237	struct comm *new, *curr = thread__comm(thread);
238
239	/* Override the default :tid entry */
240	if (!thread__comm_set(thread)) {
241		int err = comm__override(curr, str, timestamp, exec);
242		if (err)
243			return err;
244	} else {
245		new = comm__new(str, timestamp, exec);
246		if (!new)
247			return -ENOMEM;
248		list_add(&new->list, thread__comm_list(thread));
249
250		if (exec)
251			unwind__flush_access(thread__maps(thread));
252	}
253
254	thread__set_comm_set(thread, true);
255
256	return 0;
257}
258
259int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
260		       bool exec)
261{
262	int ret;
263
264	down_write(thread__comm_lock(thread));
265	ret = ____thread__set_comm(thread, str, timestamp, exec);
266	up_write(thread__comm_lock(thread));
267	return ret;
268}
269
270int thread__set_comm_from_proc(struct thread *thread)
271{
272	char path[64];
273	char *comm = NULL;
274	size_t sz;
275	int err = -1;
276
277	if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
278		       thread__pid(thread), thread__tid(thread)) >= (int)sizeof(path)) &&
279	    procfs__read_str(path, &comm, &sz) == 0) {
280		comm[sz - 1] = '\0';
281		err = thread__set_comm(thread, comm, 0);
282	}
283
284	return err;
285}
286
287static const char *__thread__comm_str(struct thread *thread)
288{
289	const struct comm *comm = thread__comm(thread);
290
291	if (!comm)
292		return NULL;
293
294	return comm__str(comm);
295}
296
297const char *thread__comm_str(struct thread *thread)
298{
299	const char *str;
300
301	down_read(thread__comm_lock(thread));
302	str = __thread__comm_str(thread);
303	up_read(thread__comm_lock(thread));
304
305	return str;
306}
307
308static int __thread__comm_len(struct thread *thread, const char *comm)
309{
310	if (!comm)
311		return 0;
312	thread__set_comm_len(thread, strlen(comm));
313
314	return thread__var_comm_len(thread);
315}
316
317/* CHECKME: it should probably better return the max comm len from its comm list */
318int thread__comm_len(struct thread *thread)
319{
320	int comm_len = thread__var_comm_len(thread);
321
322	if (!comm_len) {
323		const char *comm;
324
325		down_read(thread__comm_lock(thread));
326		comm = __thread__comm_str(thread);
327		comm_len = __thread__comm_len(thread, comm);
328		up_read(thread__comm_lock(thread));
329	}
330
331	return comm_len;
332}
333
334size_t thread__fprintf(struct thread *thread, FILE *fp)
335{
336	return fprintf(fp, "Thread %d %s\n", thread__tid(thread), thread__comm_str(thread)) +
337	       maps__fprintf(thread__maps(thread), fp);
338}
339
340int thread__insert_map(struct thread *thread, struct map *map)
341{
342	int ret;
343
344	ret = unwind__prepare_access(thread__maps(thread), map, NULL);
345	if (ret)
346		return ret;
347
348	maps__fixup_overlappings(thread__maps(thread), map, stderr);
349	return maps__insert(thread__maps(thread), map);
350}
351
352static int __thread__prepare_access(struct thread *thread)
353{
354	bool initialized = false;
355	int err = 0;
356	struct maps *maps = thread__maps(thread);
357	struct map_rb_node *rb_node;
358
359	down_read(maps__lock(maps));
360
361	maps__for_each_entry(maps, rb_node) {
362		err = unwind__prepare_access(thread__maps(thread), rb_node->map, &initialized);
363		if (err || initialized)
364			break;
365	}
366
367	up_read(maps__lock(maps));
368
369	return err;
370}
371
372static int thread__prepare_access(struct thread *thread)
373{
374	int err = 0;
375
376	if (dwarf_callchain_users)
377		err = __thread__prepare_access(thread);
378
379	return err;
380}
381
382static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
383{
384	/* This is new thread, we share map groups for process. */
385	if (thread__pid(thread) == thread__pid(parent))
386		return thread__prepare_access(thread);
387
388	if (thread__maps(thread) == thread__maps(parent)) {
389		pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
390			 thread__pid(thread), thread__tid(thread),
391			 thread__pid(parent), thread__tid(parent));
392		return 0;
393	}
394	/* But this one is new process, copy maps. */
395	return do_maps_clone ? maps__clone(thread, thread__maps(parent)) : 0;
396}
397
398int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
399{
400	if (thread__comm_set(parent)) {
401		const char *comm = thread__comm_str(parent);
402		int err;
403		if (!comm)
404			return -ENOMEM;
405		err = thread__set_comm(thread, comm, timestamp);
406		if (err)
407			return err;
408	}
409
410	thread__set_ppid(thread, thread__tid(parent));
411	return thread__clone_maps(thread, parent, do_maps_clone);
412}
413
414void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
415					struct addr_location *al)
416{
417	size_t i;
418	const u8 cpumodes[] = {
419		PERF_RECORD_MISC_USER,
420		PERF_RECORD_MISC_KERNEL,
421		PERF_RECORD_MISC_GUEST_USER,
422		PERF_RECORD_MISC_GUEST_KERNEL
423	};
424
425	for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
426		thread__find_symbol(thread, cpumodes[i], addr, al);
427		if (al->map)
428			break;
429	}
430}
431
432struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
433{
434	if (thread__pid(thread) == thread__tid(thread))
435		return thread__get(thread);
436
437	if (thread__pid(thread) == -1)
438		return NULL;
439
440	return machine__find_thread(machine, thread__pid(thread), thread__pid(thread));
441}
442
443int thread__memcpy(struct thread *thread, struct machine *machine,
444		   void *buf, u64 ip, int len, bool *is64bit)
445{
446	u8 cpumode = PERF_RECORD_MISC_USER;
447	struct addr_location al;
448	struct dso *dso;
449	long offset;
450
451	if (machine__kernel_ip(machine, ip))
452		cpumode = PERF_RECORD_MISC_KERNEL;
453
454	addr_location__init(&al);
455	if (!thread__find_map(thread, cpumode, ip, &al)) {
456		addr_location__exit(&al);
457		return -1;
458	}
459
460	dso = map__dso(al.map);
461
462	if (!dso || dso->data.status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) {
463		addr_location__exit(&al);
464		return -1;
465	}
466
467	offset = map__map_ip(al.map, ip);
468	if (is64bit)
469		*is64bit = dso->is_64_bit;
470
471	addr_location__exit(&al);
472
473	return dso__data_read_offset(dso, machine, offset, buf, len);
474}
475
476void thread__free_stitch_list(struct thread *thread)
477{
478	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
479	struct stitch_list *pos, *tmp;
480
481	if (!lbr_stitch)
482		return;
483
484	list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
485		list_del_init(&pos->node);
486		free(pos);
487	}
488
489	list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) {
490		list_del_init(&pos->node);
491		free(pos);
492	}
493
494	zfree(&lbr_stitch->prev_lbr_cursor);
495	free(thread__lbr_stitch(thread));
496	thread__set_lbr_stitch(thread, NULL);
497}
498