xref: /kernel/linux/linux-5.10/tools/perf/bench/numa.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * numa.c
4 *
5 * numa: Simulate NUMA-sensitive workload and measure their NUMA performance
6 */
7
8#include <inttypes.h>
9/* For the CLR_() macros */
10#include <pthread.h>
11
12#include <subcmd/parse-options.h>
13#include "../util/cloexec.h"
14
15#include "bench.h"
16
17#include <errno.h>
18#include <sched.h>
19#include <stdio.h>
20#include <assert.h>
21#include <malloc.h>
22#include <signal.h>
23#include <stdlib.h>
24#include <string.h>
25#include <unistd.h>
26#include <sys/mman.h>
27#include <sys/time.h>
28#include <sys/resource.h>
29#include <sys/wait.h>
30#include <sys/prctl.h>
31#include <sys/types.h>
32#include <linux/kernel.h>
33#include <linux/time64.h>
34#include <linux/numa.h>
35#include <linux/zalloc.h>
36
37#include <numa.h>
38#include <numaif.h>
39
40#ifndef RUSAGE_THREAD
41# define RUSAGE_THREAD 1
42#endif
43
44/*
45 * Regular printout to the terminal, supressed if -q is specified:
46 */
47#define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
48
49/*
50 * Debug printf:
51 */
52#undef dprintf
53#define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
54
55struct thread_data {
56	int			curr_cpu;
57	cpu_set_t		bind_cpumask;
58	int			bind_node;
59	u8			*process_data;
60	int			process_nr;
61	int			thread_nr;
62	int			task_nr;
63	unsigned int		loops_done;
64	u64			val;
65	u64			runtime_ns;
66	u64			system_time_ns;
67	u64			user_time_ns;
68	double			speed_gbs;
69	pthread_mutex_t		*process_lock;
70};
71
72/* Parameters set by options: */
73
74struct params {
75	/* Startup synchronization: */
76	bool			serialize_startup;
77
78	/* Task hierarchy: */
79	int			nr_proc;
80	int			nr_threads;
81
82	/* Working set sizes: */
83	const char		*mb_global_str;
84	const char		*mb_proc_str;
85	const char		*mb_proc_locked_str;
86	const char		*mb_thread_str;
87
88	double			mb_global;
89	double			mb_proc;
90	double			mb_proc_locked;
91	double			mb_thread;
92
93	/* Access patterns to the working set: */
94	bool			data_reads;
95	bool			data_writes;
96	bool			data_backwards;
97	bool			data_zero_memset;
98	bool			data_rand_walk;
99	u32			nr_loops;
100	u32			nr_secs;
101	u32			sleep_usecs;
102
103	/* Working set initialization: */
104	bool			init_zero;
105	bool			init_random;
106	bool			init_cpu0;
107
108	/* Misc options: */
109	int			show_details;
110	int			run_all;
111	int			thp;
112
113	long			bytes_global;
114	long			bytes_process;
115	long			bytes_process_locked;
116	long			bytes_thread;
117
118	int			nr_tasks;
119	bool			show_quiet;
120
121	bool			show_convergence;
122	bool			measure_convergence;
123
124	int			perturb_secs;
125	int			nr_cpus;
126	int			nr_nodes;
127
128	/* Affinity options -C and -N: */
129	char			*cpu_list_str;
130	char			*node_list_str;
131};
132
133
134/* Global, read-writable area, accessible to all processes and threads: */
135
136struct global_info {
137	u8			*data;
138
139	pthread_mutex_t		startup_mutex;
140	pthread_cond_t		startup_cond;
141	int			nr_tasks_started;
142
143	pthread_mutex_t		start_work_mutex;
144	pthread_cond_t		start_work_cond;
145	int			nr_tasks_working;
146	bool			start_work;
147
148	pthread_mutex_t		stop_work_mutex;
149	u64			bytes_done;
150
151	struct thread_data	*threads;
152
153	/* Convergence latency measurement: */
154	bool			all_converged;
155	bool			stop_work;
156
157	int			print_once;
158
159	struct params		p;
160};
161
162static struct global_info	*g = NULL;
163
164static int parse_cpus_opt(const struct option *opt, const char *arg, int unset);
165static int parse_nodes_opt(const struct option *opt, const char *arg, int unset);
166
167struct params p0;
168
169static const struct option options[] = {
170	OPT_INTEGER('p', "nr_proc"	, &p0.nr_proc,		"number of processes"),
171	OPT_INTEGER('t', "nr_threads"	, &p0.nr_threads,	"number of threads per process"),
172
173	OPT_STRING('G', "mb_global"	, &p0.mb_global_str,	"MB", "global  memory (MBs)"),
174	OPT_STRING('P', "mb_proc"	, &p0.mb_proc_str,	"MB", "process memory (MBs)"),
175	OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"),
176	OPT_STRING('T', "mb_thread"	, &p0.mb_thread_str,	"MB", "thread  memory (MBs)"),
177
178	OPT_UINTEGER('l', "nr_loops"	, &p0.nr_loops,		"max number of loops to run (default: unlimited)"),
179	OPT_UINTEGER('s', "nr_secs"	, &p0.nr_secs,		"max number of seconds to run (default: 5 secs)"),
180	OPT_UINTEGER('u', "usleep"	, &p0.sleep_usecs,	"usecs to sleep per loop iteration"),
181
182	OPT_BOOLEAN('R', "data_reads"	, &p0.data_reads,	"access the data via reads (can be mixed with -W)"),
183	OPT_BOOLEAN('W', "data_writes"	, &p0.data_writes,	"access the data via writes (can be mixed with -R)"),
184	OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards,	"access the data backwards as well"),
185	OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"),
186	OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk,	"access the data with random (32bit LFSR) walk"),
187
188
189	OPT_BOOLEAN('z', "init_zero"	, &p0.init_zero,	"bzero the initial allocations"),
190	OPT_BOOLEAN('I', "init_random"	, &p0.init_random,	"randomize the contents of the initial allocations"),
191	OPT_BOOLEAN('0', "init_cpu0"	, &p0.init_cpu0,	"do the initial allocations on CPU#0"),
192	OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs,	"perturb thread 0/0 every X secs, to test convergence stability"),
193
194	OPT_INCR   ('d', "show_details"	, &p0.show_details,	"Show details"),
195	OPT_INCR   ('a', "all"		, &p0.run_all,		"Run all tests in the suite"),
196	OPT_INTEGER('H', "thp"		, &p0.thp,		"MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
197	OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details, "
198		    "convergence is reached when each process (all its threads) is running on a single NUMA node."),
199	OPT_BOOLEAN('m', "measure_convergence",	&p0.measure_convergence, "measure convergence latency"),
200	OPT_BOOLEAN('q', "quiet"	, &p0.show_quiet,	"quiet mode"),
201	OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
202
203	/* Special option string parsing callbacks: */
204        OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]",
205			"bind the first N tasks to these specific cpus (the rest is unbound)",
206			parse_cpus_opt),
207        OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]",
208			"bind the first N tasks to these specific memory nodes (the rest is unbound)",
209			parse_nodes_opt),
210	OPT_END()
211};
212
213static const char * const bench_numa_usage[] = {
214	"perf bench numa <options>",
215	NULL
216};
217
218static const char * const numa_usage[] = {
219	"perf bench numa mem [<options>]",
220	NULL
221};
222
223/*
224 * To get number of numa nodes present.
225 */
226static int nr_numa_nodes(void)
227{
228	int i, nr_nodes = 0;
229
230	for (i = 0; i < g->p.nr_nodes; i++) {
231		if (numa_bitmask_isbitset(numa_nodes_ptr, i))
232			nr_nodes++;
233	}
234
235	return nr_nodes;
236}
237
238/*
239 * To check if given numa node is present.
240 */
241static int is_node_present(int node)
242{
243	return numa_bitmask_isbitset(numa_nodes_ptr, node);
244}
245
246/*
247 * To check given numa node has cpus.
248 */
249static bool node_has_cpus(int node)
250{
251	struct bitmask *cpumask = numa_allocate_cpumask();
252	bool ret = false; /* fall back to nocpus */
253	int cpu;
254
255	BUG_ON(!cpumask);
256	if (!numa_node_to_cpus(node, cpumask)) {
257		for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
258			if (numa_bitmask_isbitset(cpumask, cpu)) {
259				ret = true;
260				break;
261			}
262		}
263	}
264	numa_free_cpumask(cpumask);
265
266	return ret;
267}
268
269static cpu_set_t bind_to_cpu(int target_cpu)
270{
271	cpu_set_t orig_mask, mask;
272	int ret;
273
274	ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
275	BUG_ON(ret);
276
277	CPU_ZERO(&mask);
278
279	if (target_cpu == -1) {
280		int cpu;
281
282		for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
283			CPU_SET(cpu, &mask);
284	} else {
285		BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
286		CPU_SET(target_cpu, &mask);
287	}
288
289	ret = sched_setaffinity(0, sizeof(mask), &mask);
290	BUG_ON(ret);
291
292	return orig_mask;
293}
294
295static cpu_set_t bind_to_node(int target_node)
296{
297	cpu_set_t orig_mask, mask;
298	int cpu;
299	int ret;
300
301	ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
302	BUG_ON(ret);
303
304	CPU_ZERO(&mask);
305
306	if (target_node == NUMA_NO_NODE) {
307		for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
308			CPU_SET(cpu, &mask);
309	} else {
310		struct bitmask *cpumask = numa_allocate_cpumask();
311
312		BUG_ON(!cpumask);
313		if (!numa_node_to_cpus(target_node, cpumask)) {
314			for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
315				if (numa_bitmask_isbitset(cpumask, cpu))
316					CPU_SET(cpu, &mask);
317			}
318		}
319		numa_free_cpumask(cpumask);
320	}
321
322	ret = sched_setaffinity(0, sizeof(mask), &mask);
323	BUG_ON(ret);
324
325	return orig_mask;
326}
327
328static void bind_to_cpumask(cpu_set_t mask)
329{
330	int ret;
331
332	ret = sched_setaffinity(0, sizeof(mask), &mask);
333	BUG_ON(ret);
334}
335
336static void mempol_restore(void)
337{
338	int ret;
339
340	ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1);
341
342	BUG_ON(ret);
343}
344
345static void bind_to_memnode(int node)
346{
347	unsigned long nodemask;
348	int ret;
349
350	if (node == NUMA_NO_NODE)
351		return;
352
353	BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
354	nodemask = 1L << node;
355
356	ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8);
357	dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret);
358
359	BUG_ON(ret);
360}
361
362#define HPSIZE (2*1024*1024)
363
364#define set_taskname(fmt...)				\
365do {							\
366	char name[20];					\
367							\
368	snprintf(name, 20, fmt);			\
369	prctl(PR_SET_NAME, name);			\
370} while (0)
371
372static u8 *alloc_data(ssize_t bytes0, int map_flags,
373		      int init_zero, int init_cpu0, int thp, int init_random)
374{
375	cpu_set_t orig_mask;
376	ssize_t bytes;
377	u8 *buf;
378	int ret;
379
380	if (!bytes0)
381		return NULL;
382
383	/* Allocate and initialize all memory on CPU#0: */
384	if (init_cpu0) {
385		int node = numa_node_of_cpu(0);
386
387		orig_mask = bind_to_node(node);
388		bind_to_memnode(node);
389	}
390
391	bytes = bytes0 + HPSIZE;
392
393	buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0);
394	BUG_ON(buf == (void *)-1);
395
396	if (map_flags == MAP_PRIVATE) {
397		if (thp > 0) {
398			ret = madvise(buf, bytes, MADV_HUGEPAGE);
399			if (ret && !g->print_once) {
400				g->print_once = 1;
401				printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n");
402			}
403		}
404		if (thp < 0) {
405			ret = madvise(buf, bytes, MADV_NOHUGEPAGE);
406			if (ret && !g->print_once) {
407				g->print_once = 1;
408				printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n");
409			}
410		}
411	}
412
413	if (init_zero) {
414		bzero(buf, bytes);
415	} else {
416		/* Initialize random contents, different in each word: */
417		if (init_random) {
418			u64 *wbuf = (void *)buf;
419			long off = rand();
420			long i;
421
422			for (i = 0; i < bytes/8; i++)
423				wbuf[i] = i + off;
424		}
425	}
426
427	/* Align to 2MB boundary: */
428	buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1));
429
430	/* Restore affinity: */
431	if (init_cpu0) {
432		bind_to_cpumask(orig_mask);
433		mempol_restore();
434	}
435
436	return buf;
437}
438
439static void free_data(void *data, ssize_t bytes)
440{
441	int ret;
442
443	if (!data)
444		return;
445
446	ret = munmap(data, bytes);
447	BUG_ON(ret);
448}
449
450/*
451 * Create a shared memory buffer that can be shared between processes, zeroed:
452 */
453static void * zalloc_shared_data(ssize_t bytes)
454{
455	return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0,  g->p.thp, g->p.init_random);
456}
457
458/*
459 * Create a shared memory buffer that can be shared between processes:
460 */
461static void * setup_shared_data(ssize_t bytes)
462{
463	return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
464}
465
466/*
467 * Allocate process-local memory - this will either be shared between
468 * threads of this process, or only be accessed by this thread:
469 */
470static void * setup_private_data(ssize_t bytes)
471{
472	return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
473}
474
475/*
476 * Return a process-shared (global) mutex:
477 */
478static void init_global_mutex(pthread_mutex_t *mutex)
479{
480	pthread_mutexattr_t attr;
481
482	pthread_mutexattr_init(&attr);
483	pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
484	pthread_mutex_init(mutex, &attr);
485}
486
487/*
488 * Return a process-shared (global) condition variable:
489 */
490static void init_global_cond(pthread_cond_t *cond)
491{
492	pthread_condattr_t attr;
493
494	pthread_condattr_init(&attr);
495	pthread_condattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
496	pthread_cond_init(cond, &attr);
497}
498
499static int parse_cpu_list(const char *arg)
500{
501	p0.cpu_list_str = strdup(arg);
502
503	dprintf("got CPU list: {%s}\n", p0.cpu_list_str);
504
505	return 0;
506}
507
508static int parse_setup_cpu_list(void)
509{
510	struct thread_data *td;
511	char *str0, *str;
512	int t;
513
514	if (!g->p.cpu_list_str)
515		return 0;
516
517	dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
518
519	str0 = str = strdup(g->p.cpu_list_str);
520	t = 0;
521
522	BUG_ON(!str);
523
524	tprintf("# binding tasks to CPUs:\n");
525	tprintf("#  ");
526
527	while (true) {
528		int bind_cpu, bind_cpu_0, bind_cpu_1;
529		char *tok, *tok_end, *tok_step, *tok_len, *tok_mul;
530		int bind_len;
531		int step;
532		int mul;
533
534		tok = strsep(&str, ",");
535		if (!tok)
536			break;
537
538		tok_end = strstr(tok, "-");
539
540		dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
541		if (!tok_end) {
542			/* Single CPU specified: */
543			bind_cpu_0 = bind_cpu_1 = atol(tok);
544		} else {
545			/* CPU range specified (for example: "5-11"): */
546			bind_cpu_0 = atol(tok);
547			bind_cpu_1 = atol(tok_end + 1);
548		}
549
550		step = 1;
551		tok_step = strstr(tok, "#");
552		if (tok_step) {
553			step = atol(tok_step + 1);
554			BUG_ON(step <= 0 || step >= g->p.nr_cpus);
555		}
556
557		/*
558		 * Mask length.
559		 * Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4',
560		 * where the _4 means the next 4 CPUs are allowed.
561		 */
562		bind_len = 1;
563		tok_len = strstr(tok, "_");
564		if (tok_len) {
565			bind_len = atol(tok_len + 1);
566			BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus);
567		}
568
569		/* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
570		mul = 1;
571		tok_mul = strstr(tok, "x");
572		if (tok_mul) {
573			mul = atol(tok_mul + 1);
574			BUG_ON(mul <= 0);
575		}
576
577		dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul);
578
579		if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) {
580			printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus);
581			return -1;
582		}
583
584		BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
585		BUG_ON(bind_cpu_0 > bind_cpu_1);
586
587		for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
588			int i;
589
590			for (i = 0; i < mul; i++) {
591				int cpu;
592
593				if (t >= g->p.nr_tasks) {
594					printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu);
595					goto out;
596				}
597				td = g->threads + t;
598
599				if (t)
600					tprintf(",");
601				if (bind_len > 1) {
602					tprintf("%2d/%d", bind_cpu, bind_len);
603				} else {
604					tprintf("%2d", bind_cpu);
605				}
606
607				CPU_ZERO(&td->bind_cpumask);
608				for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
609					BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
610					CPU_SET(cpu, &td->bind_cpumask);
611				}
612				t++;
613			}
614		}
615	}
616out:
617
618	tprintf("\n");
619
620	if (t < g->p.nr_tasks)
621		printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
622
623	free(str0);
624	return 0;
625}
626
627static int parse_cpus_opt(const struct option *opt __maybe_unused,
628			  const char *arg, int unset __maybe_unused)
629{
630	if (!arg)
631		return -1;
632
633	return parse_cpu_list(arg);
634}
635
636static int parse_node_list(const char *arg)
637{
638	p0.node_list_str = strdup(arg);
639
640	dprintf("got NODE list: {%s}\n", p0.node_list_str);
641
642	return 0;
643}
644
645static int parse_setup_node_list(void)
646{
647	struct thread_data *td;
648	char *str0, *str;
649	int t;
650
651	if (!g->p.node_list_str)
652		return 0;
653
654	dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
655
656	str0 = str = strdup(g->p.node_list_str);
657	t = 0;
658
659	BUG_ON(!str);
660
661	tprintf("# binding tasks to NODEs:\n");
662	tprintf("# ");
663
664	while (true) {
665		int bind_node, bind_node_0, bind_node_1;
666		char *tok, *tok_end, *tok_step, *tok_mul;
667		int step;
668		int mul;
669
670		tok = strsep(&str, ",");
671		if (!tok)
672			break;
673
674		tok_end = strstr(tok, "-");
675
676		dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
677		if (!tok_end) {
678			/* Single NODE specified: */
679			bind_node_0 = bind_node_1 = atol(tok);
680		} else {
681			/* NODE range specified (for example: "5-11"): */
682			bind_node_0 = atol(tok);
683			bind_node_1 = atol(tok_end + 1);
684		}
685
686		step = 1;
687		tok_step = strstr(tok, "#");
688		if (tok_step) {
689			step = atol(tok_step + 1);
690			BUG_ON(step <= 0 || step >= g->p.nr_nodes);
691		}
692
693		/* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
694		mul = 1;
695		tok_mul = strstr(tok, "x");
696		if (tok_mul) {
697			mul = atol(tok_mul + 1);
698			BUG_ON(mul <= 0);
699		}
700
701		dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step);
702
703		if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) {
704			printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes);
705			return -1;
706		}
707
708		BUG_ON(bind_node_0 < 0 || bind_node_1 < 0);
709		BUG_ON(bind_node_0 > bind_node_1);
710
711		for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) {
712			int i;
713
714			for (i = 0; i < mul; i++) {
715				if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
716					printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
717					goto out;
718				}
719				td = g->threads + t;
720
721				if (!t)
722					tprintf(" %2d", bind_node);
723				else
724					tprintf(",%2d", bind_node);
725
726				td->bind_node = bind_node;
727				t++;
728			}
729		}
730	}
731out:
732
733	tprintf("\n");
734
735	if (t < g->p.nr_tasks)
736		printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
737
738	free(str0);
739	return 0;
740}
741
742static int parse_nodes_opt(const struct option *opt __maybe_unused,
743			  const char *arg, int unset __maybe_unused)
744{
745	if (!arg)
746		return -1;
747
748	return parse_node_list(arg);
749}
750
751#define BIT(x) (1ul << x)
752
753static inline uint32_t lfsr_32(uint32_t lfsr)
754{
755	const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
756	return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps);
757}
758
759/*
760 * Make sure there's real data dependency to RAM (when read
761 * accesses are enabled), so the compiler, the CPU and the
762 * kernel (KSM, zero page, etc.) cannot optimize away RAM
763 * accesses:
764 */
765static inline u64 access_data(u64 *data, u64 val)
766{
767	if (g->p.data_reads)
768		val += *data;
769	if (g->p.data_writes)
770		*data = val + 1;
771	return val;
772}
773
774/*
775 * The worker process does two types of work, a forwards going
776 * loop and a backwards going loop.
777 *
778 * We do this so that on multiprocessor systems we do not create
779 * a 'train' of processing, with highly synchronized processes,
780 * skewing the whole benchmark.
781 */
782static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val)
783{
784	long words = bytes/sizeof(u64);
785	u64 *data = (void *)__data;
786	long chunk_0, chunk_1;
787	u64 *d0, *d, *d1;
788	long off;
789	long i;
790
791	BUG_ON(!data && words);
792	BUG_ON(data && !words);
793
794	if (!data)
795		return val;
796
797	/* Very simple memset() work variant: */
798	if (g->p.data_zero_memset && !g->p.data_rand_walk) {
799		bzero(data, bytes);
800		return val;
801	}
802
803	/* Spread out by PID/TID nr and by loop nr: */
804	chunk_0 = words/nr_max;
805	chunk_1 = words/g->p.nr_loops;
806	off = nr*chunk_0 + loop*chunk_1;
807
808	while (off >= words)
809		off -= words;
810
811	if (g->p.data_rand_walk) {
812		u32 lfsr = nr + loop + val;
813		int j;
814
815		for (i = 0; i < words/1024; i++) {
816			long start, end;
817
818			lfsr = lfsr_32(lfsr);
819
820			start = lfsr % words;
821			end = min(start + 1024, words-1);
822
823			if (g->p.data_zero_memset) {
824				bzero(data + start, (end-start) * sizeof(u64));
825			} else {
826				for (j = start; j < end; j++)
827					val = access_data(data + j, val);
828			}
829		}
830	} else if (!g->p.data_backwards || (nr + loop) & 1) {
831		/* Process data forwards: */
832
833		d0 = data + off;
834		d  = data + off + 1;
835		d1 = data + words;
836
837		for (;;) {
838			if (unlikely(d >= d1))
839				d = data;
840			if (unlikely(d == d0))
841				break;
842
843			val = access_data(d, val);
844
845			d++;
846		}
847	} else {
848		/* Process data backwards: */
849
850		d0 = data + off;
851		d  = data + off - 1;
852		d1 = data + words;
853
854		for (;;) {
855			if (unlikely(d < data))
856				d = data + words-1;
857			if (unlikely(d == d0))
858				break;
859
860			val = access_data(d, val);
861
862			d--;
863		}
864	}
865
866	return val;
867}
868
869static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
870{
871	unsigned int cpu;
872
873	cpu = sched_getcpu();
874
875	g->threads[task_nr].curr_cpu = cpu;
876	prctl(0, bytes_worked);
877}
878
879#define MAX_NR_NODES	64
880
881/*
882 * Count the number of nodes a process's threads
883 * are spread out on.
884 *
885 * A count of 1 means that the process is compressed
886 * to a single node. A count of g->p.nr_nodes means it's
887 * spread out on the whole system.
888 */
889static int count_process_nodes(int process_nr)
890{
891	char node_present[MAX_NR_NODES] = { 0, };
892	int nodes;
893	int n, t;
894
895	for (t = 0; t < g->p.nr_threads; t++) {
896		struct thread_data *td;
897		int task_nr;
898		int node;
899
900		task_nr = process_nr*g->p.nr_threads + t;
901		td = g->threads + task_nr;
902
903		node = numa_node_of_cpu(td->curr_cpu);
904		if (node < 0) /* curr_cpu was likely still -1 */
905			return 0;
906
907		node_present[node] = 1;
908	}
909
910	nodes = 0;
911
912	for (n = 0; n < MAX_NR_NODES; n++)
913		nodes += node_present[n];
914
915	return nodes;
916}
917
918/*
919 * Count the number of distinct process-threads a node contains.
920 *
921 * A count of 1 means that the node contains only a single
922 * process. If all nodes on the system contain at most one
923 * process then we are well-converged.
924 */
925static int count_node_processes(int node)
926{
927	int processes = 0;
928	int t, p;
929
930	for (p = 0; p < g->p.nr_proc; p++) {
931		for (t = 0; t < g->p.nr_threads; t++) {
932			struct thread_data *td;
933			int task_nr;
934			int n;
935
936			task_nr = p*g->p.nr_threads + t;
937			td = g->threads + task_nr;
938
939			n = numa_node_of_cpu(td->curr_cpu);
940			if (n == node) {
941				processes++;
942				break;
943			}
944		}
945	}
946
947	return processes;
948}
949
950static void calc_convergence_compression(int *strong)
951{
952	unsigned int nodes_min, nodes_max;
953	int p;
954
955	nodes_min = -1;
956	nodes_max =  0;
957
958	for (p = 0; p < g->p.nr_proc; p++) {
959		unsigned int nodes = count_process_nodes(p);
960
961		if (!nodes) {
962			*strong = 0;
963			return;
964		}
965
966		nodes_min = min(nodes, nodes_min);
967		nodes_max = max(nodes, nodes_max);
968	}
969
970	/* Strong convergence: all threads compress on a single node: */
971	if (nodes_min == 1 && nodes_max == 1) {
972		*strong = 1;
973	} else {
974		*strong = 0;
975		tprintf(" {%d-%d}", nodes_min, nodes_max);
976	}
977}
978
979static void calc_convergence(double runtime_ns_max, double *convergence)
980{
981	unsigned int loops_done_min, loops_done_max;
982	int process_groups;
983	int nodes[MAX_NR_NODES];
984	int distance;
985	int nr_min;
986	int nr_max;
987	int strong;
988	int sum;
989	int nr;
990	int node;
991	int cpu;
992	int t;
993
994	if (!g->p.show_convergence && !g->p.measure_convergence)
995		return;
996
997	for (node = 0; node < g->p.nr_nodes; node++)
998		nodes[node] = 0;
999
1000	loops_done_min = -1;
1001	loops_done_max = 0;
1002
1003	for (t = 0; t < g->p.nr_tasks; t++) {
1004		struct thread_data *td = g->threads + t;
1005		unsigned int loops_done;
1006
1007		cpu = td->curr_cpu;
1008
1009		/* Not all threads have written it yet: */
1010		if (cpu < 0)
1011			continue;
1012
1013		node = numa_node_of_cpu(cpu);
1014
1015		nodes[node]++;
1016
1017		loops_done = td->loops_done;
1018		loops_done_min = min(loops_done, loops_done_min);
1019		loops_done_max = max(loops_done, loops_done_max);
1020	}
1021
1022	nr_max = 0;
1023	nr_min = g->p.nr_tasks;
1024	sum = 0;
1025
1026	for (node = 0; node < g->p.nr_nodes; node++) {
1027		if (!is_node_present(node))
1028			continue;
1029		nr = nodes[node];
1030		nr_min = min(nr, nr_min);
1031		nr_max = max(nr, nr_max);
1032		sum += nr;
1033	}
1034	BUG_ON(nr_min > nr_max);
1035
1036	BUG_ON(sum > g->p.nr_tasks);
1037
1038	if (0 && (sum < g->p.nr_tasks))
1039		return;
1040
1041	/*
1042	 * Count the number of distinct process groups present
1043	 * on nodes - when we are converged this will decrease
1044	 * to g->p.nr_proc:
1045	 */
1046	process_groups = 0;
1047
1048	for (node = 0; node < g->p.nr_nodes; node++) {
1049		int processes;
1050
1051		if (!is_node_present(node))
1052			continue;
1053		processes = count_node_processes(node);
1054		nr = nodes[node];
1055		tprintf(" %2d/%-2d", nr, processes);
1056
1057		process_groups += processes;
1058	}
1059
1060	distance = nr_max - nr_min;
1061
1062	tprintf(" [%2d/%-2d]", distance, process_groups);
1063
1064	tprintf(" l:%3d-%-3d (%3d)",
1065		loops_done_min, loops_done_max, loops_done_max-loops_done_min);
1066
1067	if (loops_done_min && loops_done_max) {
1068		double skew = 1.0 - (double)loops_done_min/loops_done_max;
1069
1070		tprintf(" [%4.1f%%]", skew * 100.0);
1071	}
1072
1073	calc_convergence_compression(&strong);
1074
1075	if (strong && process_groups == g->p.nr_proc) {
1076		if (!*convergence) {
1077			*convergence = runtime_ns_max;
1078			tprintf(" (%6.1fs converged)\n", *convergence / NSEC_PER_SEC);
1079			if (g->p.measure_convergence) {
1080				g->all_converged = true;
1081				g->stop_work = true;
1082			}
1083		}
1084	} else {
1085		if (*convergence) {
1086			tprintf(" (%6.1fs de-converged)", runtime_ns_max / NSEC_PER_SEC);
1087			*convergence = 0;
1088		}
1089		tprintf("\n");
1090	}
1091}
1092
1093static void show_summary(double runtime_ns_max, int l, double *convergence)
1094{
1095	tprintf("\r #  %5.1f%%  [%.1f mins]",
1096		(double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max / NSEC_PER_SEC / 60.0);
1097
1098	calc_convergence(runtime_ns_max, convergence);
1099
1100	if (g->p.show_details >= 0)
1101		fflush(stdout);
1102}
1103
1104static void *worker_thread(void *__tdata)
1105{
1106	struct thread_data *td = __tdata;
1107	struct timeval start0, start, stop, diff;
1108	int process_nr = td->process_nr;
1109	int thread_nr = td->thread_nr;
1110	unsigned long last_perturbance;
1111	int task_nr = td->task_nr;
1112	int details = g->p.show_details;
1113	int first_task, last_task;
1114	double convergence = 0;
1115	u64 val = td->val;
1116	double runtime_ns_max;
1117	u8 *global_data;
1118	u8 *process_data;
1119	u8 *thread_data;
1120	u64 bytes_done, secs;
1121	long work_done;
1122	u32 l;
1123	struct rusage rusage;
1124
1125	bind_to_cpumask(td->bind_cpumask);
1126	bind_to_memnode(td->bind_node);
1127
1128	set_taskname("thread %d/%d", process_nr, thread_nr);
1129
1130	global_data = g->data;
1131	process_data = td->process_data;
1132	thread_data = setup_private_data(g->p.bytes_thread);
1133
1134	bytes_done = 0;
1135
1136	last_task = 0;
1137	if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1)
1138		last_task = 1;
1139
1140	first_task = 0;
1141	if (process_nr == 0 && thread_nr == 0)
1142		first_task = 1;
1143
1144	if (details >= 2) {
1145		printf("#  thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n",
1146			process_nr, thread_nr, global_data, process_data, thread_data);
1147	}
1148
1149	if (g->p.serialize_startup) {
1150		pthread_mutex_lock(&g->startup_mutex);
1151		g->nr_tasks_started++;
1152		/* The last thread wakes the main process. */
1153		if (g->nr_tasks_started == g->p.nr_tasks)
1154			pthread_cond_signal(&g->startup_cond);
1155
1156		pthread_mutex_unlock(&g->startup_mutex);
1157
1158		/* Here we will wait for the main process to start us all at once: */
1159		pthread_mutex_lock(&g->start_work_mutex);
1160		g->start_work = false;
1161		g->nr_tasks_working++;
1162		while (!g->start_work)
1163			pthread_cond_wait(&g->start_work_cond, &g->start_work_mutex);
1164
1165		pthread_mutex_unlock(&g->start_work_mutex);
1166	}
1167
1168	gettimeofday(&start0, NULL);
1169
1170	start = stop = start0;
1171	last_perturbance = start.tv_sec;
1172
1173	for (l = 0; l < g->p.nr_loops; l++) {
1174		start = stop;
1175
1176		if (g->stop_work)
1177			break;
1178
1179		val += do_work(global_data,  g->p.bytes_global,  process_nr, g->p.nr_proc,	l, val);
1180		val += do_work(process_data, g->p.bytes_process, thread_nr,  g->p.nr_threads,	l, val);
1181		val += do_work(thread_data,  g->p.bytes_thread,  0,          1,		l, val);
1182
1183		if (g->p.sleep_usecs) {
1184			pthread_mutex_lock(td->process_lock);
1185			usleep(g->p.sleep_usecs);
1186			pthread_mutex_unlock(td->process_lock);
1187		}
1188		/*
1189		 * Amount of work to be done under a process-global lock:
1190		 */
1191		if (g->p.bytes_process_locked) {
1192			pthread_mutex_lock(td->process_lock);
1193			val += do_work(process_data, g->p.bytes_process_locked, thread_nr,  g->p.nr_threads,	l, val);
1194			pthread_mutex_unlock(td->process_lock);
1195		}
1196
1197		work_done = g->p.bytes_global + g->p.bytes_process +
1198			    g->p.bytes_process_locked + g->p.bytes_thread;
1199
1200		update_curr_cpu(task_nr, work_done);
1201		bytes_done += work_done;
1202
1203		if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs)
1204			continue;
1205
1206		td->loops_done = l;
1207
1208		gettimeofday(&stop, NULL);
1209
1210		/* Check whether our max runtime timed out: */
1211		if (g->p.nr_secs) {
1212			timersub(&stop, &start0, &diff);
1213			if ((u32)diff.tv_sec >= g->p.nr_secs) {
1214				g->stop_work = true;
1215				break;
1216			}
1217		}
1218
1219		/* Update the summary at most once per second: */
1220		if (start.tv_sec == stop.tv_sec)
1221			continue;
1222
1223		/*
1224		 * Perturb the first task's equilibrium every g->p.perturb_secs seconds,
1225		 * by migrating to CPU#0:
1226		 */
1227		if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
1228			cpu_set_t orig_mask;
1229			int target_cpu;
1230			int this_cpu;
1231
1232			last_perturbance = stop.tv_sec;
1233
1234			/*
1235			 * Depending on where we are running, move into
1236			 * the other half of the system, to create some
1237			 * real disturbance:
1238			 */
1239			this_cpu = g->threads[task_nr].curr_cpu;
1240			if (this_cpu < g->p.nr_cpus/2)
1241				target_cpu = g->p.nr_cpus-1;
1242			else
1243				target_cpu = 0;
1244
1245			orig_mask = bind_to_cpu(target_cpu);
1246
1247			/* Here we are running on the target CPU already */
1248			if (details >= 1)
1249				printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
1250
1251			bind_to_cpumask(orig_mask);
1252		}
1253
1254		if (details >= 3) {
1255			timersub(&stop, &start, &diff);
1256			runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
1257			runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
1258
1259			if (details >= 0) {
1260				printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016"PRIx64"]\n",
1261					process_nr, thread_nr, runtime_ns_max / bytes_done, val);
1262			}
1263			fflush(stdout);
1264		}
1265		if (!last_task)
1266			continue;
1267
1268		timersub(&stop, &start0, &diff);
1269		runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
1270		runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
1271
1272		show_summary(runtime_ns_max, l, &convergence);
1273	}
1274
1275	gettimeofday(&stop, NULL);
1276	timersub(&stop, &start0, &diff);
1277	td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
1278	td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
1279	secs = td->runtime_ns / NSEC_PER_SEC;
1280	td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0;
1281
1282	getrusage(RUSAGE_THREAD, &rusage);
1283	td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
1284	td->system_time_ns += rusage.ru_stime.tv_usec * NSEC_PER_USEC;
1285	td->user_time_ns = rusage.ru_utime.tv_sec * NSEC_PER_SEC;
1286	td->user_time_ns += rusage.ru_utime.tv_usec * NSEC_PER_USEC;
1287
1288	free_data(thread_data, g->p.bytes_thread);
1289
1290	pthread_mutex_lock(&g->stop_work_mutex);
1291	g->bytes_done += bytes_done;
1292	pthread_mutex_unlock(&g->stop_work_mutex);
1293
1294	return NULL;
1295}
1296
1297/*
1298 * A worker process starts a couple of threads:
1299 */
1300static void worker_process(int process_nr)
1301{
1302	pthread_mutex_t process_lock;
1303	struct thread_data *td;
1304	pthread_t *pthreads;
1305	u8 *process_data;
1306	int task_nr;
1307	int ret;
1308	int t;
1309
1310	pthread_mutex_init(&process_lock, NULL);
1311	set_taskname("process %d", process_nr);
1312
1313	/*
1314	 * Pick up the memory policy and the CPU binding of our first thread,
1315	 * so that we initialize memory accordingly:
1316	 */
1317	task_nr = process_nr*g->p.nr_threads;
1318	td = g->threads + task_nr;
1319
1320	bind_to_memnode(td->bind_node);
1321	bind_to_cpumask(td->bind_cpumask);
1322
1323	pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t));
1324	process_data = setup_private_data(g->p.bytes_process);
1325
1326	if (g->p.show_details >= 3) {
1327		printf(" # process %2d global mem: %p, process mem: %p\n",
1328			process_nr, g->data, process_data);
1329	}
1330
1331	for (t = 0; t < g->p.nr_threads; t++) {
1332		task_nr = process_nr*g->p.nr_threads + t;
1333		td = g->threads + task_nr;
1334
1335		td->process_data = process_data;
1336		td->process_nr   = process_nr;
1337		td->thread_nr    = t;
1338		td->task_nr	 = task_nr;
1339		td->val          = rand();
1340		td->curr_cpu	 = -1;
1341		td->process_lock = &process_lock;
1342
1343		ret = pthread_create(pthreads + t, NULL, worker_thread, td);
1344		BUG_ON(ret);
1345	}
1346
1347	for (t = 0; t < g->p.nr_threads; t++) {
1348                ret = pthread_join(pthreads[t], NULL);
1349		BUG_ON(ret);
1350	}
1351
1352	free_data(process_data, g->p.bytes_process);
1353	free(pthreads);
1354}
1355
1356static void print_summary(void)
1357{
1358	if (g->p.show_details < 0)
1359		return;
1360
1361	printf("\n ###\n");
1362	printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1363		g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
1364	printf(" #      %5dx %5ldMB global  shared mem operations\n",
1365			g->p.nr_loops, g->p.bytes_global/1024/1024);
1366	printf(" #      %5dx %5ldMB process shared mem operations\n",
1367			g->p.nr_loops, g->p.bytes_process/1024/1024);
1368	printf(" #      %5dx %5ldMB thread  local  mem operations\n",
1369			g->p.nr_loops, g->p.bytes_thread/1024/1024);
1370
1371	printf(" ###\n");
1372
1373	printf("\n ###\n"); fflush(stdout);
1374}
1375
1376static void init_thread_data(void)
1377{
1378	ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1379	int t;
1380
1381	g->threads = zalloc_shared_data(size);
1382
1383	for (t = 0; t < g->p.nr_tasks; t++) {
1384		struct thread_data *td = g->threads + t;
1385		int cpu;
1386
1387		/* Allow all nodes by default: */
1388		td->bind_node = NUMA_NO_NODE;
1389
1390		/* Allow all CPUs by default: */
1391		CPU_ZERO(&td->bind_cpumask);
1392		for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
1393			CPU_SET(cpu, &td->bind_cpumask);
1394	}
1395}
1396
1397static void deinit_thread_data(void)
1398{
1399	ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1400
1401	free_data(g->threads, size);
1402}
1403
1404static int init(void)
1405{
1406	g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0);
1407
1408	/* Copy over options: */
1409	g->p = p0;
1410
1411	g->p.nr_cpus = numa_num_configured_cpus();
1412
1413	g->p.nr_nodes = numa_max_node() + 1;
1414
1415	/* char array in count_process_nodes(): */
1416	BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0);
1417
1418	if (g->p.show_quiet && !g->p.show_details)
1419		g->p.show_details = -1;
1420
1421	/* Some memory should be specified: */
1422	if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str)
1423		return -1;
1424
1425	if (g->p.mb_global_str) {
1426		g->p.mb_global = atof(g->p.mb_global_str);
1427		BUG_ON(g->p.mb_global < 0);
1428	}
1429
1430	if (g->p.mb_proc_str) {
1431		g->p.mb_proc = atof(g->p.mb_proc_str);
1432		BUG_ON(g->p.mb_proc < 0);
1433	}
1434
1435	if (g->p.mb_proc_locked_str) {
1436		g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str);
1437		BUG_ON(g->p.mb_proc_locked < 0);
1438		BUG_ON(g->p.mb_proc_locked > g->p.mb_proc);
1439	}
1440
1441	if (g->p.mb_thread_str) {
1442		g->p.mb_thread = atof(g->p.mb_thread_str);
1443		BUG_ON(g->p.mb_thread < 0);
1444	}
1445
1446	BUG_ON(g->p.nr_threads <= 0);
1447	BUG_ON(g->p.nr_proc <= 0);
1448
1449	g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads;
1450
1451	g->p.bytes_global		= g->p.mb_global	*1024L*1024L;
1452	g->p.bytes_process		= g->p.mb_proc		*1024L*1024L;
1453	g->p.bytes_process_locked	= g->p.mb_proc_locked	*1024L*1024L;
1454	g->p.bytes_thread		= g->p.mb_thread	*1024L*1024L;
1455
1456	g->data = setup_shared_data(g->p.bytes_global);
1457
1458	/* Startup serialization: */
1459	init_global_mutex(&g->start_work_mutex);
1460	init_global_cond(&g->start_work_cond);
1461	init_global_mutex(&g->startup_mutex);
1462	init_global_cond(&g->startup_cond);
1463	init_global_mutex(&g->stop_work_mutex);
1464
1465	init_thread_data();
1466
1467	tprintf("#\n");
1468	if (parse_setup_cpu_list() || parse_setup_node_list())
1469		return -1;
1470	tprintf("#\n");
1471
1472	print_summary();
1473
1474	return 0;
1475}
1476
1477static void deinit(void)
1478{
1479	free_data(g->data, g->p.bytes_global);
1480	g->data = NULL;
1481
1482	deinit_thread_data();
1483
1484	free_data(g, sizeof(*g));
1485	g = NULL;
1486}
1487
1488/*
1489 * Print a short or long result, depending on the verbosity setting:
1490 */
1491static void print_res(const char *name, double val,
1492		      const char *txt_unit, const char *txt_short, const char *txt_long)
1493{
1494	if (!name)
1495		name = "main,";
1496
1497	if (!g->p.show_quiet)
1498		printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
1499	else
1500		printf(" %14.3f %s\n", val, txt_long);
1501}
1502
1503static int __bench_numa(const char *name)
1504{
1505	struct timeval start, stop, diff;
1506	u64 runtime_ns_min, runtime_ns_sum;
1507	pid_t *pids, pid, wpid;
1508	double delta_runtime;
1509	double runtime_avg;
1510	double runtime_sec_max;
1511	double runtime_sec_min;
1512	int wait_stat;
1513	double bytes;
1514	int i, t, p;
1515
1516	if (init())
1517		return -1;
1518
1519	pids = zalloc(g->p.nr_proc * sizeof(*pids));
1520	pid = -1;
1521
1522	if (g->p.serialize_startup) {
1523		tprintf(" #\n");
1524		tprintf(" # Startup synchronization: ..."); fflush(stdout);
1525	}
1526
1527	gettimeofday(&start, NULL);
1528
1529	for (i = 0; i < g->p.nr_proc; i++) {
1530		pid = fork();
1531		dprintf(" # process %2d: PID %d\n", i, pid);
1532
1533		BUG_ON(pid < 0);
1534		if (!pid) {
1535			/* Child process: */
1536			worker_process(i);
1537
1538			exit(0);
1539		}
1540		pids[i] = pid;
1541
1542	}
1543
1544	if (g->p.serialize_startup) {
1545		bool threads_ready = false;
1546		double startup_sec;
1547
1548		/*
1549		 * Wait for all the threads to start up. The last thread will
1550		 * signal this process.
1551		 */
1552		pthread_mutex_lock(&g->startup_mutex);
1553		while (g->nr_tasks_started != g->p.nr_tasks)
1554			pthread_cond_wait(&g->startup_cond, &g->startup_mutex);
1555
1556		pthread_mutex_unlock(&g->startup_mutex);
1557
1558		/* Wait for all threads to be at the start_work_cond. */
1559		while (!threads_ready) {
1560			pthread_mutex_lock(&g->start_work_mutex);
1561			threads_ready = (g->nr_tasks_working == g->p.nr_tasks);
1562			pthread_mutex_unlock(&g->start_work_mutex);
1563			if (!threads_ready)
1564				usleep(1);
1565		}
1566
1567		gettimeofday(&stop, NULL);
1568
1569		timersub(&stop, &start, &diff);
1570
1571		startup_sec = diff.tv_sec * NSEC_PER_SEC;
1572		startup_sec += diff.tv_usec * NSEC_PER_USEC;
1573		startup_sec /= NSEC_PER_SEC;
1574
1575		tprintf(" threads initialized in %.6f seconds.\n", startup_sec);
1576		tprintf(" #\n");
1577
1578		start = stop;
1579		/* Start all threads running. */
1580		pthread_mutex_lock(&g->start_work_mutex);
1581		g->start_work = true;
1582		pthread_mutex_unlock(&g->start_work_mutex);
1583		pthread_cond_broadcast(&g->start_work_cond);
1584	} else {
1585		gettimeofday(&start, NULL);
1586	}
1587
1588	/* Parent process: */
1589
1590
1591	for (i = 0; i < g->p.nr_proc; i++) {
1592		wpid = waitpid(pids[i], &wait_stat, 0);
1593		BUG_ON(wpid < 0);
1594		BUG_ON(!WIFEXITED(wait_stat));
1595
1596	}
1597
1598	runtime_ns_sum = 0;
1599	runtime_ns_min = -1LL;
1600
1601	for (t = 0; t < g->p.nr_tasks; t++) {
1602		u64 thread_runtime_ns = g->threads[t].runtime_ns;
1603
1604		runtime_ns_sum += thread_runtime_ns;
1605		runtime_ns_min = min(thread_runtime_ns, runtime_ns_min);
1606	}
1607
1608	gettimeofday(&stop, NULL);
1609	timersub(&stop, &start, &diff);
1610
1611	BUG_ON(bench_format != BENCH_FORMAT_DEFAULT);
1612
1613	tprintf("\n ###\n");
1614	tprintf("\n");
1615
1616	runtime_sec_max = diff.tv_sec * NSEC_PER_SEC;
1617	runtime_sec_max += diff.tv_usec * NSEC_PER_USEC;
1618	runtime_sec_max /= NSEC_PER_SEC;
1619
1620	runtime_sec_min = runtime_ns_min / NSEC_PER_SEC;
1621
1622	bytes = g->bytes_done;
1623	runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / NSEC_PER_SEC;
1624
1625	if (g->p.measure_convergence) {
1626		print_res(name, runtime_sec_max,
1627			"secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge");
1628	}
1629
1630	print_res(name, runtime_sec_max,
1631		"secs,", "runtime-max/thread",	"secs slowest (max) thread-runtime");
1632
1633	print_res(name, runtime_sec_min,
1634		"secs,", "runtime-min/thread",	"secs fastest (min) thread-runtime");
1635
1636	print_res(name, runtime_avg,
1637		"secs,", "runtime-avg/thread",	"secs average thread-runtime");
1638
1639	delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0;
1640	print_res(name, delta_runtime / runtime_sec_max * 100.0,
1641		"%,", "spread-runtime/thread",	"% difference between max/avg runtime");
1642
1643	print_res(name, bytes / g->p.nr_tasks / 1e9,
1644		"GB,", "data/thread",		"GB data processed, per thread");
1645
1646	print_res(name, bytes / 1e9,
1647		"GB,", "data-total",		"GB data processed, total");
1648
1649	print_res(name, runtime_sec_max * NSEC_PER_SEC / (bytes / g->p.nr_tasks),
1650		"nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime");
1651
1652	print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max,
1653		"GB/sec,", "thread-speed",	"GB/sec/thread speed");
1654
1655	print_res(name, bytes / runtime_sec_max / 1e9,
1656		"GB/sec,", "total-speed",	"GB/sec total speed");
1657
1658	if (g->p.show_details >= 2) {
1659		char tname[14 + 2 * 11 + 1];
1660		struct thread_data *td;
1661		for (p = 0; p < g->p.nr_proc; p++) {
1662			for (t = 0; t < g->p.nr_threads; t++) {
1663				memset(tname, 0, sizeof(tname));
1664				td = g->threads + p*g->p.nr_threads + t;
1665				snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
1666				print_res(tname, td->speed_gbs,
1667					"GB/sec",	"thread-speed", "GB/sec/thread speed");
1668				print_res(tname, td->system_time_ns / NSEC_PER_SEC,
1669					"secs",	"thread-system-time", "system CPU time/thread");
1670				print_res(tname, td->user_time_ns / NSEC_PER_SEC,
1671					"secs",	"thread-user-time", "user CPU time/thread");
1672			}
1673		}
1674	}
1675
1676	free(pids);
1677
1678	deinit();
1679
1680	return 0;
1681}
1682
1683#define MAX_ARGS 50
1684
1685static int command_size(const char **argv)
1686{
1687	int size = 0;
1688
1689	while (*argv) {
1690		size++;
1691		argv++;
1692	}
1693
1694	BUG_ON(size >= MAX_ARGS);
1695
1696	return size;
1697}
1698
1699static void init_params(struct params *p, const char *name, int argc, const char **argv)
1700{
1701	int i;
1702
1703	printf("\n # Running %s \"perf bench numa", name);
1704
1705	for (i = 0; i < argc; i++)
1706		printf(" %s", argv[i]);
1707
1708	printf("\"\n");
1709
1710	memset(p, 0, sizeof(*p));
1711
1712	/* Initialize nonzero defaults: */
1713
1714	p->serialize_startup		= 1;
1715	p->data_reads			= true;
1716	p->data_writes			= true;
1717	p->data_backwards		= true;
1718	p->data_rand_walk		= true;
1719	p->nr_loops			= -1;
1720	p->init_random			= true;
1721	p->mb_global_str		= "1";
1722	p->nr_proc			= 1;
1723	p->nr_threads			= 1;
1724	p->nr_secs			= 5;
1725	p->run_all			= argc == 1;
1726}
1727
1728static int run_bench_numa(const char *name, const char **argv)
1729{
1730	int argc = command_size(argv);
1731
1732	init_params(&p0, name, argc, argv);
1733	argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1734	if (argc)
1735		goto err;
1736
1737	if (__bench_numa(name))
1738		goto err;
1739
1740	return 0;
1741
1742err:
1743	return -1;
1744}
1745
1746#define OPT_BW_RAM		"-s",  "20", "-zZq",    "--thp", " 1", "--no-data_rand_walk"
1747#define OPT_BW_RAM_NOTHP	OPT_BW_RAM,		"--thp", "-1"
1748
1749#define OPT_CONV		"-s", "100", "-zZ0qcm", "--thp", " 1"
1750#define OPT_CONV_NOTHP		OPT_CONV,		"--thp", "-1"
1751
1752#define OPT_BW			"-s",  "20", "-zZ0q",   "--thp", " 1"
1753#define OPT_BW_NOTHP		OPT_BW,			"--thp", "-1"
1754
1755/*
1756 * The built-in test-suite executed by "perf bench numa -a".
1757 *
1758 * (A minimum of 4 nodes and 16 GB of RAM is recommended.)
1759 */
1760static const char *tests[][MAX_ARGS] = {
1761   /* Basic single-stream NUMA bandwidth measurements: */
1762   { "RAM-bw-local,",     "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1763			  "-C" ,   "0", "-M",   "0", OPT_BW_RAM },
1764   { "RAM-bw-local-NOTHP,",
1765			  "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1766			  "-C" ,   "0", "-M",   "0", OPT_BW_RAM_NOTHP },
1767   { "RAM-bw-remote,",    "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1768			  "-C" ,   "0", "-M",   "1", OPT_BW_RAM },
1769
1770   /* 2-stream NUMA bandwidth measurements: */
1771   { "RAM-bw-local-2x,",  "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1772			   "-C", "0,2", "-M", "0x2", OPT_BW_RAM },
1773   { "RAM-bw-remote-2x,", "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1774		 	   "-C", "0,2", "-M", "1x2", OPT_BW_RAM },
1775
1776   /* Cross-stream NUMA bandwidth measurement: */
1777   { "RAM-bw-cross,",     "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1778		 	   "-C", "0,8", "-M", "1,0", OPT_BW_RAM },
1779
1780   /* Convergence latency measurements: */
1781   { " 1x3-convergence,", "mem",  "-p",  "1", "-t",  "3", "-P",  "512", OPT_CONV },
1782   { " 1x4-convergence,", "mem",  "-p",  "1", "-t",  "4", "-P",  "512", OPT_CONV },
1783   { " 1x6-convergence,", "mem",  "-p",  "1", "-t",  "6", "-P", "1020", OPT_CONV },
1784   { " 2x3-convergence,", "mem",  "-p",  "2", "-t",  "3", "-P", "1020", OPT_CONV },
1785   { " 3x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
1786   { " 4x4-convergence,", "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV },
1787   { " 4x4-convergence-NOTHP,",
1788			  "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1789   { " 4x6-convergence,", "mem",  "-p",  "4", "-t",  "6", "-P", "1020", OPT_CONV },
1790   { " 4x8-convergence,", "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_CONV },
1791   { " 8x4-convergence,", "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV },
1792   { " 8x4-convergence-NOTHP,",
1793			  "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1794   { " 3x1-convergence,", "mem",  "-p",  "3", "-t",  "1", "-P",  "512", OPT_CONV },
1795   { " 4x1-convergence,", "mem",  "-p",  "4", "-t",  "1", "-P",  "512", OPT_CONV },
1796   { " 8x1-convergence,", "mem",  "-p",  "8", "-t",  "1", "-P",  "512", OPT_CONV },
1797   { "16x1-convergence,", "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_CONV },
1798   { "32x1-convergence,", "mem",  "-p", "32", "-t",  "1", "-P",  "128", OPT_CONV },
1799
1800   /* Various NUMA process/thread layout bandwidth measurements: */
1801   { " 2x1-bw-process,",  "mem",  "-p",  "2", "-t",  "1", "-P", "1024", OPT_BW },
1802   { " 3x1-bw-process,",  "mem",  "-p",  "3", "-t",  "1", "-P", "1024", OPT_BW },
1803   { " 4x1-bw-process,",  "mem",  "-p",  "4", "-t",  "1", "-P", "1024", OPT_BW },
1804   { " 8x1-bw-process,",  "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW },
1805   { " 8x1-bw-process-NOTHP,",
1806			  "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW_NOTHP },
1807   { "16x1-bw-process,",  "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_BW },
1808
1809   { " 1x4-bw-thread,",   "mem",  "-p",  "1", "-t",  "4", "-T",  "256", OPT_BW },
1810   { " 1x8-bw-thread,",   "mem",  "-p",  "1", "-t",  "8", "-T",  "256", OPT_BW },
1811   { "1x16-bw-thread,",   "mem",  "-p",  "1", "-t", "16", "-T",  "128", OPT_BW },
1812   { "1x32-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-T",   "64", OPT_BW },
1813
1814   { " 2x3-bw-process,",  "mem",  "-p",  "2", "-t",  "3", "-P",  "512", OPT_BW },
1815   { " 4x4-bw-process,",  "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_BW },
1816   { " 4x6-bw-process,",  "mem",  "-p",  "4", "-t",  "6", "-P",  "512", OPT_BW },
1817   { " 4x8-bw-process,",  "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW },
1818   { " 4x8-bw-process-NOTHP,",
1819			  "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW_NOTHP },
1820   { " 3x3-bw-process,",  "mem",  "-p",  "3", "-t",  "3", "-P",  "512", OPT_BW },
1821   { " 5x5-bw-process,",  "mem",  "-p",  "5", "-t",  "5", "-P",  "512", OPT_BW },
1822
1823   { "2x16-bw-process,",  "mem",  "-p",  "2", "-t", "16", "-P",  "512", OPT_BW },
1824   { "1x32-bw-process,",  "mem",  "-p",  "1", "-t", "32", "-P", "2048", OPT_BW },
1825
1826   { "numa02-bw,",        "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW },
1827   { "numa02-bw-NOTHP,",  "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW_NOTHP },
1828   { "numa01-bw-thread,", "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW },
1829   { "numa01-bw-thread-NOTHP,",
1830			  "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW_NOTHP },
1831};
1832
1833static int bench_all(void)
1834{
1835	int nr = ARRAY_SIZE(tests);
1836	int ret;
1837	int i;
1838
1839	ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'");
1840	BUG_ON(ret < 0);
1841
1842	for (i = 0; i < nr; i++) {
1843		run_bench_numa(tests[i][0], tests[i] + 1);
1844	}
1845
1846	printf("\n");
1847
1848	return 0;
1849}
1850
1851int bench_numa(int argc, const char **argv)
1852{
1853	init_params(&p0, "main,", argc, argv);
1854	argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1855	if (argc)
1856		goto err;
1857
1858	if (p0.run_all)
1859		return bench_all();
1860
1861	if (__bench_numa(NULL))
1862		goto err;
1863
1864	return 0;
1865
1866err:
1867	usage_with_options(numa_usage, options);
1868	return -1;
1869}
1870