18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci#ifdef HAVE_EVENTFD_SUPPORT 38c2ecf20Sopenharmony_ci/* 48c2ecf20Sopenharmony_ci * Copyright (C) 2018 Davidlohr Bueso. 58c2ecf20Sopenharmony_ci * 68c2ecf20Sopenharmony_ci * This program benchmarks concurrent epoll_wait(2) monitoring multiple 78c2ecf20Sopenharmony_ci * file descriptors under one or two load balancing models. The first, 88c2ecf20Sopenharmony_ci * and default, is the single/combined queueing (which refers to a single 98c2ecf20Sopenharmony_ci * epoll instance for N worker threads): 108c2ecf20Sopenharmony_ci * 118c2ecf20Sopenharmony_ci * |---> [worker A] 128c2ecf20Sopenharmony_ci * |---> [worker B] 138c2ecf20Sopenharmony_ci * [combined queue] .---> [worker C] 148c2ecf20Sopenharmony_ci * |---> [worker D] 158c2ecf20Sopenharmony_ci * |---> [worker E] 168c2ecf20Sopenharmony_ci * 178c2ecf20Sopenharmony_ci * While the second model, enabled via --multiq option, uses multiple 188c2ecf20Sopenharmony_ci * queueing (which refers to one epoll instance per worker). For example, 198c2ecf20Sopenharmony_ci * short lived tcp connections in a high throughput httpd server will 208c2ecf20Sopenharmony_ci * ditribute the accept()'ing connections across CPUs. In this case each 218c2ecf20Sopenharmony_ci * worker does a limited amount of processing. 228c2ecf20Sopenharmony_ci * 238c2ecf20Sopenharmony_ci * [queue A] ---> [worker] 248c2ecf20Sopenharmony_ci * [queue B] ---> [worker] 258c2ecf20Sopenharmony_ci * [queue C] ---> [worker] 268c2ecf20Sopenharmony_ci * [queue D] ---> [worker] 278c2ecf20Sopenharmony_ci * [queue E] ---> [worker] 288c2ecf20Sopenharmony_ci * 298c2ecf20Sopenharmony_ci * Naturally, the single queue will enforce more concurrency on the epoll 308c2ecf20Sopenharmony_ci * instance, and can therefore scale poorly compared to multiple queues. 318c2ecf20Sopenharmony_ci * However, this is a benchmark raw data and must be taken with a grain of 328c2ecf20Sopenharmony_ci * salt when choosing how to make use of sys_epoll. 338c2ecf20Sopenharmony_ci 348c2ecf20Sopenharmony_ci * Each thread has a number of private, nonblocking file descriptors, 358c2ecf20Sopenharmony_ci * referred to as fdmap. A writer thread will constantly be writing to 368c2ecf20Sopenharmony_ci * the fdmaps of all threads, minimizing each threads's chances of 378c2ecf20Sopenharmony_ci * epoll_wait not finding any ready read events and blocking as this 388c2ecf20Sopenharmony_ci * is not what we want to stress. The size of the fdmap can be adjusted 398c2ecf20Sopenharmony_ci * by the user; enlarging the value will increase the chances of 408c2ecf20Sopenharmony_ci * epoll_wait(2) blocking as the lineal writer thread will take "longer", 418c2ecf20Sopenharmony_ci * at least at a high level. 428c2ecf20Sopenharmony_ci * 438c2ecf20Sopenharmony_ci * Note that because fds are private to each thread, this workload does 448c2ecf20Sopenharmony_ci * not stress scenarios where multiple tasks are awoken per ready IO; ie: 458c2ecf20Sopenharmony_ci * EPOLLEXCLUSIVE semantics. 468c2ecf20Sopenharmony_ci * 478c2ecf20Sopenharmony_ci * The end result/metric is throughput: number of ops/second where an 488c2ecf20Sopenharmony_ci * operation consists of: 498c2ecf20Sopenharmony_ci * 508c2ecf20Sopenharmony_ci * epoll_wait(2) + [others] 518c2ecf20Sopenharmony_ci * 528c2ecf20Sopenharmony_ci * ... where [others] is the cost of re-adding the fd (EPOLLET), 538c2ecf20Sopenharmony_ci * or rearming it (EPOLLONESHOT). 548c2ecf20Sopenharmony_ci * 558c2ecf20Sopenharmony_ci * 568c2ecf20Sopenharmony_ci * The purpose of this is program is that it be useful for measuring 578c2ecf20Sopenharmony_ci * kernel related changes to the sys_epoll, and not comparing different 588c2ecf20Sopenharmony_ci * IO polling methods, for example. Hence everything is very adhoc and 598c2ecf20Sopenharmony_ci * outputs raw microbenchmark numbers. Also this uses eventfd, similar 608c2ecf20Sopenharmony_ci * tools tend to use pipes or sockets, but the result is the same. 618c2ecf20Sopenharmony_ci */ 628c2ecf20Sopenharmony_ci 638c2ecf20Sopenharmony_ci/* For the CLR_() macros */ 648c2ecf20Sopenharmony_ci#include <string.h> 658c2ecf20Sopenharmony_ci#include <pthread.h> 668c2ecf20Sopenharmony_ci#include <unistd.h> 678c2ecf20Sopenharmony_ci 688c2ecf20Sopenharmony_ci#include <errno.h> 698c2ecf20Sopenharmony_ci#include <inttypes.h> 708c2ecf20Sopenharmony_ci#include <signal.h> 718c2ecf20Sopenharmony_ci#include <stdlib.h> 728c2ecf20Sopenharmony_ci#include <linux/compiler.h> 738c2ecf20Sopenharmony_ci#include <linux/kernel.h> 748c2ecf20Sopenharmony_ci#include <sys/time.h> 758c2ecf20Sopenharmony_ci#include <sys/resource.h> 768c2ecf20Sopenharmony_ci#include <sys/epoll.h> 778c2ecf20Sopenharmony_ci#include <sys/eventfd.h> 788c2ecf20Sopenharmony_ci#include <sys/types.h> 798c2ecf20Sopenharmony_ci#include <internal/cpumap.h> 808c2ecf20Sopenharmony_ci#include <perf/cpumap.h> 818c2ecf20Sopenharmony_ci 828c2ecf20Sopenharmony_ci#include "../util/stat.h" 838c2ecf20Sopenharmony_ci#include <subcmd/parse-options.h> 848c2ecf20Sopenharmony_ci#include "bench.h" 858c2ecf20Sopenharmony_ci 868c2ecf20Sopenharmony_ci#include <err.h> 878c2ecf20Sopenharmony_ci 888c2ecf20Sopenharmony_ci#define printinfo(fmt, arg...) \ 898c2ecf20Sopenharmony_ci do { if (__verbose) { printf(fmt, ## arg); fflush(stdout); } } while (0) 908c2ecf20Sopenharmony_ci 918c2ecf20Sopenharmony_cistatic unsigned int nthreads = 0; 928c2ecf20Sopenharmony_cistatic unsigned int nsecs = 8; 938c2ecf20Sopenharmony_cistatic bool wdone, done, __verbose, randomize, nonblocking; 948c2ecf20Sopenharmony_ci 958c2ecf20Sopenharmony_ci/* 968c2ecf20Sopenharmony_ci * epoll related shared variables. 978c2ecf20Sopenharmony_ci */ 988c2ecf20Sopenharmony_ci 998c2ecf20Sopenharmony_ci/* Maximum number of nesting allowed inside epoll sets */ 1008c2ecf20Sopenharmony_ci#define EPOLL_MAXNESTS 4 1018c2ecf20Sopenharmony_ci 1028c2ecf20Sopenharmony_cistatic int epollfd; 1038c2ecf20Sopenharmony_cistatic int *epollfdp; 1048c2ecf20Sopenharmony_cistatic bool noaffinity; 1058c2ecf20Sopenharmony_cistatic unsigned int nested = 0; 1068c2ecf20Sopenharmony_cistatic bool et; /* edge-trigger */ 1078c2ecf20Sopenharmony_cistatic bool oneshot; 1088c2ecf20Sopenharmony_cistatic bool multiq; /* use an epoll instance per thread */ 1098c2ecf20Sopenharmony_ci 1108c2ecf20Sopenharmony_ci/* amount of fds to monitor, per thread */ 1118c2ecf20Sopenharmony_cistatic unsigned int nfds = 64; 1128c2ecf20Sopenharmony_ci 1138c2ecf20Sopenharmony_cistatic pthread_mutex_t thread_lock; 1148c2ecf20Sopenharmony_cistatic unsigned int threads_starting; 1158c2ecf20Sopenharmony_cistatic struct stats throughput_stats; 1168c2ecf20Sopenharmony_cistatic pthread_cond_t thread_parent, thread_worker; 1178c2ecf20Sopenharmony_ci 1188c2ecf20Sopenharmony_cistruct worker { 1198c2ecf20Sopenharmony_ci int tid; 1208c2ecf20Sopenharmony_ci int epollfd; /* for --multiq */ 1218c2ecf20Sopenharmony_ci pthread_t thread; 1228c2ecf20Sopenharmony_ci unsigned long ops; 1238c2ecf20Sopenharmony_ci int *fdmap; 1248c2ecf20Sopenharmony_ci}; 1258c2ecf20Sopenharmony_ci 1268c2ecf20Sopenharmony_cistatic const struct option options[] = { 1278c2ecf20Sopenharmony_ci /* general benchmark options */ 1288c2ecf20Sopenharmony_ci OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"), 1298c2ecf20Sopenharmony_ci OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"), 1308c2ecf20Sopenharmony_ci OPT_UINTEGER('f', "nfds", &nfds, "Specify amount of file descriptors to monitor for each thread"), 1318c2ecf20Sopenharmony_ci OPT_BOOLEAN( 'n', "noaffinity", &noaffinity, "Disables CPU affinity"), 1328c2ecf20Sopenharmony_ci OPT_BOOLEAN('R', "randomize", &randomize, "Enable random write behaviour (default is lineal)"), 1338c2ecf20Sopenharmony_ci OPT_BOOLEAN( 'v', "verbose", &__verbose, "Verbose mode"), 1348c2ecf20Sopenharmony_ci 1358c2ecf20Sopenharmony_ci /* epoll specific options */ 1368c2ecf20Sopenharmony_ci OPT_BOOLEAN( 'm', "multiq", &multiq, "Use multiple epoll instances (one per thread)"), 1378c2ecf20Sopenharmony_ci OPT_BOOLEAN( 'B', "nonblocking", &nonblocking, "Nonblocking epoll_wait(2) behaviour"), 1388c2ecf20Sopenharmony_ci OPT_UINTEGER( 'N', "nested", &nested, "Nesting level epoll hierarchy (default is 0, no nesting)"), 1398c2ecf20Sopenharmony_ci OPT_BOOLEAN( 'S', "oneshot", &oneshot, "Use EPOLLONESHOT semantics"), 1408c2ecf20Sopenharmony_ci OPT_BOOLEAN( 'E', "edge", &et, "Use Edge-triggered interface (default is LT)"), 1418c2ecf20Sopenharmony_ci 1428c2ecf20Sopenharmony_ci OPT_END() 1438c2ecf20Sopenharmony_ci}; 1448c2ecf20Sopenharmony_ci 1458c2ecf20Sopenharmony_cistatic const char * const bench_epoll_wait_usage[] = { 1468c2ecf20Sopenharmony_ci "perf bench epoll wait <options>", 1478c2ecf20Sopenharmony_ci NULL 1488c2ecf20Sopenharmony_ci}; 1498c2ecf20Sopenharmony_ci 1508c2ecf20Sopenharmony_ci 1518c2ecf20Sopenharmony_ci/* 1528c2ecf20Sopenharmony_ci * Arrange the N elements of ARRAY in random order. 1538c2ecf20Sopenharmony_ci * Only effective if N is much smaller than RAND_MAX; 1548c2ecf20Sopenharmony_ci * if this may not be the case, use a better random 1558c2ecf20Sopenharmony_ci * number generator. -- Ben Pfaff. 1568c2ecf20Sopenharmony_ci */ 1578c2ecf20Sopenharmony_cistatic void shuffle(void *array, size_t n, size_t size) 1588c2ecf20Sopenharmony_ci{ 1598c2ecf20Sopenharmony_ci char *carray = array; 1608c2ecf20Sopenharmony_ci void *aux; 1618c2ecf20Sopenharmony_ci size_t i; 1628c2ecf20Sopenharmony_ci 1638c2ecf20Sopenharmony_ci if (n <= 1) 1648c2ecf20Sopenharmony_ci return; 1658c2ecf20Sopenharmony_ci 1668c2ecf20Sopenharmony_ci aux = calloc(1, size); 1678c2ecf20Sopenharmony_ci if (!aux) 1688c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "calloc"); 1698c2ecf20Sopenharmony_ci 1708c2ecf20Sopenharmony_ci for (i = 1; i < n; ++i) { 1718c2ecf20Sopenharmony_ci size_t j = i + rand() / (RAND_MAX / (n - i) + 1); 1728c2ecf20Sopenharmony_ci j *= size; 1738c2ecf20Sopenharmony_ci 1748c2ecf20Sopenharmony_ci memcpy(aux, &carray[j], size); 1758c2ecf20Sopenharmony_ci memcpy(&carray[j], &carray[i*size], size); 1768c2ecf20Sopenharmony_ci memcpy(&carray[i*size], aux, size); 1778c2ecf20Sopenharmony_ci } 1788c2ecf20Sopenharmony_ci 1798c2ecf20Sopenharmony_ci free(aux); 1808c2ecf20Sopenharmony_ci} 1818c2ecf20Sopenharmony_ci 1828c2ecf20Sopenharmony_ci 1838c2ecf20Sopenharmony_cistatic void *workerfn(void *arg) 1848c2ecf20Sopenharmony_ci{ 1858c2ecf20Sopenharmony_ci int fd, ret, r; 1868c2ecf20Sopenharmony_ci struct worker *w = (struct worker *) arg; 1878c2ecf20Sopenharmony_ci unsigned long ops = w->ops; 1888c2ecf20Sopenharmony_ci struct epoll_event ev; 1898c2ecf20Sopenharmony_ci uint64_t val; 1908c2ecf20Sopenharmony_ci int to = nonblocking? 0 : -1; 1918c2ecf20Sopenharmony_ci int efd = multiq ? w->epollfd : epollfd; 1928c2ecf20Sopenharmony_ci 1938c2ecf20Sopenharmony_ci pthread_mutex_lock(&thread_lock); 1948c2ecf20Sopenharmony_ci threads_starting--; 1958c2ecf20Sopenharmony_ci if (!threads_starting) 1968c2ecf20Sopenharmony_ci pthread_cond_signal(&thread_parent); 1978c2ecf20Sopenharmony_ci pthread_cond_wait(&thread_worker, &thread_lock); 1988c2ecf20Sopenharmony_ci pthread_mutex_unlock(&thread_lock); 1998c2ecf20Sopenharmony_ci 2008c2ecf20Sopenharmony_ci do { 2018c2ecf20Sopenharmony_ci /* 2028c2ecf20Sopenharmony_ci * Block undefinitely waiting for the IN event. 2038c2ecf20Sopenharmony_ci * In order to stress the epoll_wait(2) syscall, 2048c2ecf20Sopenharmony_ci * call it event per event, instead of a larger 2058c2ecf20Sopenharmony_ci * batch (max)limit. 2068c2ecf20Sopenharmony_ci */ 2078c2ecf20Sopenharmony_ci do { 2088c2ecf20Sopenharmony_ci ret = epoll_wait(efd, &ev, 1, to); 2098c2ecf20Sopenharmony_ci } while (ret < 0 && errno == EINTR); 2108c2ecf20Sopenharmony_ci if (ret < 0) 2118c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "epoll_wait"); 2128c2ecf20Sopenharmony_ci 2138c2ecf20Sopenharmony_ci fd = ev.data.fd; 2148c2ecf20Sopenharmony_ci 2158c2ecf20Sopenharmony_ci do { 2168c2ecf20Sopenharmony_ci r = read(fd, &val, sizeof(val)); 2178c2ecf20Sopenharmony_ci } while (!done && (r < 0 && errno == EAGAIN)); 2188c2ecf20Sopenharmony_ci 2198c2ecf20Sopenharmony_ci if (et) { 2208c2ecf20Sopenharmony_ci ev.events = EPOLLIN | EPOLLET; 2218c2ecf20Sopenharmony_ci ret = epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ev); 2228c2ecf20Sopenharmony_ci } 2238c2ecf20Sopenharmony_ci 2248c2ecf20Sopenharmony_ci if (oneshot) { 2258c2ecf20Sopenharmony_ci /* rearm the file descriptor with a new event mask */ 2268c2ecf20Sopenharmony_ci ev.events |= EPOLLIN | EPOLLONESHOT; 2278c2ecf20Sopenharmony_ci ret = epoll_ctl(efd, EPOLL_CTL_MOD, fd, &ev); 2288c2ecf20Sopenharmony_ci } 2298c2ecf20Sopenharmony_ci 2308c2ecf20Sopenharmony_ci ops++; 2318c2ecf20Sopenharmony_ci } while (!done); 2328c2ecf20Sopenharmony_ci 2338c2ecf20Sopenharmony_ci if (multiq) 2348c2ecf20Sopenharmony_ci close(w->epollfd); 2358c2ecf20Sopenharmony_ci 2368c2ecf20Sopenharmony_ci w->ops = ops; 2378c2ecf20Sopenharmony_ci return NULL; 2388c2ecf20Sopenharmony_ci} 2398c2ecf20Sopenharmony_ci 2408c2ecf20Sopenharmony_cistatic void nest_epollfd(struct worker *w) 2418c2ecf20Sopenharmony_ci{ 2428c2ecf20Sopenharmony_ci unsigned int i; 2438c2ecf20Sopenharmony_ci struct epoll_event ev; 2448c2ecf20Sopenharmony_ci int efd = multiq ? w->epollfd : epollfd; 2458c2ecf20Sopenharmony_ci 2468c2ecf20Sopenharmony_ci if (nested > EPOLL_MAXNESTS) 2478c2ecf20Sopenharmony_ci nested = EPOLL_MAXNESTS; 2488c2ecf20Sopenharmony_ci 2498c2ecf20Sopenharmony_ci epollfdp = calloc(nested, sizeof(*epollfdp)); 2508c2ecf20Sopenharmony_ci if (!epollfdp) 2518c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "calloc"); 2528c2ecf20Sopenharmony_ci 2538c2ecf20Sopenharmony_ci for (i = 0; i < nested; i++) { 2548c2ecf20Sopenharmony_ci epollfdp[i] = epoll_create(1); 2558c2ecf20Sopenharmony_ci if (epollfdp[i] < 0) 2568c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "epoll_create"); 2578c2ecf20Sopenharmony_ci } 2588c2ecf20Sopenharmony_ci 2598c2ecf20Sopenharmony_ci ev.events = EPOLLHUP; /* anything */ 2608c2ecf20Sopenharmony_ci ev.data.u64 = i; /* any number */ 2618c2ecf20Sopenharmony_ci 2628c2ecf20Sopenharmony_ci for (i = nested - 1; i; i--) { 2638c2ecf20Sopenharmony_ci if (epoll_ctl(epollfdp[i - 1], EPOLL_CTL_ADD, 2648c2ecf20Sopenharmony_ci epollfdp[i], &ev) < 0) 2658c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "epoll_ctl"); 2668c2ecf20Sopenharmony_ci } 2678c2ecf20Sopenharmony_ci 2688c2ecf20Sopenharmony_ci if (epoll_ctl(efd, EPOLL_CTL_ADD, *epollfdp, &ev) < 0) 2698c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "epoll_ctl"); 2708c2ecf20Sopenharmony_ci} 2718c2ecf20Sopenharmony_ci 2728c2ecf20Sopenharmony_cistatic void toggle_done(int sig __maybe_unused, 2738c2ecf20Sopenharmony_ci siginfo_t *info __maybe_unused, 2748c2ecf20Sopenharmony_ci void *uc __maybe_unused) 2758c2ecf20Sopenharmony_ci{ 2768c2ecf20Sopenharmony_ci /* inform all threads that we're done for the day */ 2778c2ecf20Sopenharmony_ci done = true; 2788c2ecf20Sopenharmony_ci gettimeofday(&bench__end, NULL); 2798c2ecf20Sopenharmony_ci timersub(&bench__end, &bench__start, &bench__runtime); 2808c2ecf20Sopenharmony_ci} 2818c2ecf20Sopenharmony_ci 2828c2ecf20Sopenharmony_cistatic void print_summary(void) 2838c2ecf20Sopenharmony_ci{ 2848c2ecf20Sopenharmony_ci unsigned long avg = avg_stats(&throughput_stats); 2858c2ecf20Sopenharmony_ci double stddev = stddev_stats(&throughput_stats); 2868c2ecf20Sopenharmony_ci 2878c2ecf20Sopenharmony_ci printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n", 2888c2ecf20Sopenharmony_ci avg, rel_stddev_stats(stddev, avg), 2898c2ecf20Sopenharmony_ci (int)bench__runtime.tv_sec); 2908c2ecf20Sopenharmony_ci} 2918c2ecf20Sopenharmony_ci 2928c2ecf20Sopenharmony_cistatic int do_threads(struct worker *worker, struct perf_cpu_map *cpu) 2938c2ecf20Sopenharmony_ci{ 2948c2ecf20Sopenharmony_ci pthread_attr_t thread_attr, *attrp = NULL; 2958c2ecf20Sopenharmony_ci cpu_set_t cpuset; 2968c2ecf20Sopenharmony_ci unsigned int i, j; 2978c2ecf20Sopenharmony_ci int ret = 0, events = EPOLLIN; 2988c2ecf20Sopenharmony_ci 2998c2ecf20Sopenharmony_ci if (oneshot) 3008c2ecf20Sopenharmony_ci events |= EPOLLONESHOT; 3018c2ecf20Sopenharmony_ci if (et) 3028c2ecf20Sopenharmony_ci events |= EPOLLET; 3038c2ecf20Sopenharmony_ci 3048c2ecf20Sopenharmony_ci printinfo("starting worker/consumer %sthreads%s\n", 3058c2ecf20Sopenharmony_ci noaffinity ? "":"CPU affinity ", 3068c2ecf20Sopenharmony_ci nonblocking ? " (nonblocking)":""); 3078c2ecf20Sopenharmony_ci if (!noaffinity) 3088c2ecf20Sopenharmony_ci pthread_attr_init(&thread_attr); 3098c2ecf20Sopenharmony_ci 3108c2ecf20Sopenharmony_ci for (i = 0; i < nthreads; i++) { 3118c2ecf20Sopenharmony_ci struct worker *w = &worker[i]; 3128c2ecf20Sopenharmony_ci 3138c2ecf20Sopenharmony_ci if (multiq) { 3148c2ecf20Sopenharmony_ci w->epollfd = epoll_create(1); 3158c2ecf20Sopenharmony_ci if (w->epollfd < 0) 3168c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "epoll_create"); 3178c2ecf20Sopenharmony_ci 3188c2ecf20Sopenharmony_ci if (nested) 3198c2ecf20Sopenharmony_ci nest_epollfd(w); 3208c2ecf20Sopenharmony_ci } 3218c2ecf20Sopenharmony_ci 3228c2ecf20Sopenharmony_ci w->tid = i; 3238c2ecf20Sopenharmony_ci w->fdmap = calloc(nfds, sizeof(int)); 3248c2ecf20Sopenharmony_ci if (!w->fdmap) 3258c2ecf20Sopenharmony_ci return 1; 3268c2ecf20Sopenharmony_ci 3278c2ecf20Sopenharmony_ci for (j = 0; j < nfds; j++) { 3288c2ecf20Sopenharmony_ci int efd = multiq ? w->epollfd : epollfd; 3298c2ecf20Sopenharmony_ci struct epoll_event ev; 3308c2ecf20Sopenharmony_ci 3318c2ecf20Sopenharmony_ci w->fdmap[j] = eventfd(0, EFD_NONBLOCK); 3328c2ecf20Sopenharmony_ci if (w->fdmap[j] < 0) 3338c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "eventfd"); 3348c2ecf20Sopenharmony_ci 3358c2ecf20Sopenharmony_ci ev.data.fd = w->fdmap[j]; 3368c2ecf20Sopenharmony_ci ev.events = events; 3378c2ecf20Sopenharmony_ci 3388c2ecf20Sopenharmony_ci ret = epoll_ctl(efd, EPOLL_CTL_ADD, 3398c2ecf20Sopenharmony_ci w->fdmap[j], &ev); 3408c2ecf20Sopenharmony_ci if (ret < 0) 3418c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "epoll_ctl"); 3428c2ecf20Sopenharmony_ci } 3438c2ecf20Sopenharmony_ci 3448c2ecf20Sopenharmony_ci if (!noaffinity) { 3458c2ecf20Sopenharmony_ci CPU_ZERO(&cpuset); 3468c2ecf20Sopenharmony_ci CPU_SET(cpu->map[i % cpu->nr], &cpuset); 3478c2ecf20Sopenharmony_ci 3488c2ecf20Sopenharmony_ci ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset); 3498c2ecf20Sopenharmony_ci if (ret) 3508c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "pthread_attr_setaffinity_np"); 3518c2ecf20Sopenharmony_ci 3528c2ecf20Sopenharmony_ci attrp = &thread_attr; 3538c2ecf20Sopenharmony_ci } 3548c2ecf20Sopenharmony_ci 3558c2ecf20Sopenharmony_ci ret = pthread_create(&w->thread, attrp, workerfn, 3568c2ecf20Sopenharmony_ci (void *)(struct worker *) w); 3578c2ecf20Sopenharmony_ci if (ret) 3588c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "pthread_create"); 3598c2ecf20Sopenharmony_ci } 3608c2ecf20Sopenharmony_ci 3618c2ecf20Sopenharmony_ci if (!noaffinity) 3628c2ecf20Sopenharmony_ci pthread_attr_destroy(&thread_attr); 3638c2ecf20Sopenharmony_ci 3648c2ecf20Sopenharmony_ci return ret; 3658c2ecf20Sopenharmony_ci} 3668c2ecf20Sopenharmony_ci 3678c2ecf20Sopenharmony_cistatic void *writerfn(void *p) 3688c2ecf20Sopenharmony_ci{ 3698c2ecf20Sopenharmony_ci struct worker *worker = p; 3708c2ecf20Sopenharmony_ci size_t i, j, iter; 3718c2ecf20Sopenharmony_ci const uint64_t val = 1; 3728c2ecf20Sopenharmony_ci ssize_t sz; 3738c2ecf20Sopenharmony_ci struct timespec ts = { .tv_sec = 0, 3748c2ecf20Sopenharmony_ci .tv_nsec = 500 }; 3758c2ecf20Sopenharmony_ci 3768c2ecf20Sopenharmony_ci printinfo("starting writer-thread: doing %s writes ...\n", 3778c2ecf20Sopenharmony_ci randomize? "random":"lineal"); 3788c2ecf20Sopenharmony_ci 3798c2ecf20Sopenharmony_ci for (iter = 0; !wdone; iter++) { 3808c2ecf20Sopenharmony_ci if (randomize) { 3818c2ecf20Sopenharmony_ci shuffle((void *)worker, nthreads, sizeof(*worker)); 3828c2ecf20Sopenharmony_ci } 3838c2ecf20Sopenharmony_ci 3848c2ecf20Sopenharmony_ci for (i = 0; i < nthreads; i++) { 3858c2ecf20Sopenharmony_ci struct worker *w = &worker[i]; 3868c2ecf20Sopenharmony_ci 3878c2ecf20Sopenharmony_ci if (randomize) { 3888c2ecf20Sopenharmony_ci shuffle((void *)w->fdmap, nfds, sizeof(int)); 3898c2ecf20Sopenharmony_ci } 3908c2ecf20Sopenharmony_ci 3918c2ecf20Sopenharmony_ci for (j = 0; j < nfds; j++) { 3928c2ecf20Sopenharmony_ci do { 3938c2ecf20Sopenharmony_ci sz = write(w->fdmap[j], &val, sizeof(val)); 3948c2ecf20Sopenharmony_ci } while (!wdone && (sz < 0 && errno == EAGAIN)); 3958c2ecf20Sopenharmony_ci } 3968c2ecf20Sopenharmony_ci } 3978c2ecf20Sopenharmony_ci 3988c2ecf20Sopenharmony_ci nanosleep(&ts, NULL); 3998c2ecf20Sopenharmony_ci } 4008c2ecf20Sopenharmony_ci 4018c2ecf20Sopenharmony_ci printinfo("exiting writer-thread (total full-loops: %zd)\n", iter); 4028c2ecf20Sopenharmony_ci return NULL; 4038c2ecf20Sopenharmony_ci} 4048c2ecf20Sopenharmony_ci 4058c2ecf20Sopenharmony_cistatic int cmpworker(const void *p1, const void *p2) 4068c2ecf20Sopenharmony_ci{ 4078c2ecf20Sopenharmony_ci 4088c2ecf20Sopenharmony_ci struct worker *w1 = (struct worker *) p1; 4098c2ecf20Sopenharmony_ci struct worker *w2 = (struct worker *) p2; 4108c2ecf20Sopenharmony_ci return w1->tid > w2->tid; 4118c2ecf20Sopenharmony_ci} 4128c2ecf20Sopenharmony_ci 4138c2ecf20Sopenharmony_ciint bench_epoll_wait(int argc, const char **argv) 4148c2ecf20Sopenharmony_ci{ 4158c2ecf20Sopenharmony_ci int ret = 0; 4168c2ecf20Sopenharmony_ci struct sigaction act; 4178c2ecf20Sopenharmony_ci unsigned int i; 4188c2ecf20Sopenharmony_ci struct worker *worker = NULL; 4198c2ecf20Sopenharmony_ci struct perf_cpu_map *cpu; 4208c2ecf20Sopenharmony_ci pthread_t wthread; 4218c2ecf20Sopenharmony_ci struct rlimit rl, prevrl; 4228c2ecf20Sopenharmony_ci 4238c2ecf20Sopenharmony_ci argc = parse_options(argc, argv, options, bench_epoll_wait_usage, 0); 4248c2ecf20Sopenharmony_ci if (argc) { 4258c2ecf20Sopenharmony_ci usage_with_options(bench_epoll_wait_usage, options); 4268c2ecf20Sopenharmony_ci exit(EXIT_FAILURE); 4278c2ecf20Sopenharmony_ci } 4288c2ecf20Sopenharmony_ci 4298c2ecf20Sopenharmony_ci memset(&act, 0, sizeof(act)); 4308c2ecf20Sopenharmony_ci sigfillset(&act.sa_mask); 4318c2ecf20Sopenharmony_ci act.sa_sigaction = toggle_done; 4328c2ecf20Sopenharmony_ci sigaction(SIGINT, &act, NULL); 4338c2ecf20Sopenharmony_ci 4348c2ecf20Sopenharmony_ci cpu = perf_cpu_map__new(NULL); 4358c2ecf20Sopenharmony_ci if (!cpu) 4368c2ecf20Sopenharmony_ci goto errmem; 4378c2ecf20Sopenharmony_ci 4388c2ecf20Sopenharmony_ci /* a single, main epoll instance */ 4398c2ecf20Sopenharmony_ci if (!multiq) { 4408c2ecf20Sopenharmony_ci epollfd = epoll_create(1); 4418c2ecf20Sopenharmony_ci if (epollfd < 0) 4428c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "epoll_create"); 4438c2ecf20Sopenharmony_ci 4448c2ecf20Sopenharmony_ci /* 4458c2ecf20Sopenharmony_ci * Deal with nested epolls, if any. 4468c2ecf20Sopenharmony_ci */ 4478c2ecf20Sopenharmony_ci if (nested) 4488c2ecf20Sopenharmony_ci nest_epollfd(NULL); 4498c2ecf20Sopenharmony_ci } 4508c2ecf20Sopenharmony_ci 4518c2ecf20Sopenharmony_ci printinfo("Using %s queue model\n", multiq ? "multi" : "single"); 4528c2ecf20Sopenharmony_ci printinfo("Nesting level(s): %d\n", nested); 4538c2ecf20Sopenharmony_ci 4548c2ecf20Sopenharmony_ci /* default to the number of CPUs and leave one for the writer pthread */ 4558c2ecf20Sopenharmony_ci if (!nthreads) 4568c2ecf20Sopenharmony_ci nthreads = cpu->nr - 1; 4578c2ecf20Sopenharmony_ci 4588c2ecf20Sopenharmony_ci worker = calloc(nthreads, sizeof(*worker)); 4598c2ecf20Sopenharmony_ci if (!worker) { 4608c2ecf20Sopenharmony_ci goto errmem; 4618c2ecf20Sopenharmony_ci } 4628c2ecf20Sopenharmony_ci 4638c2ecf20Sopenharmony_ci if (getrlimit(RLIMIT_NOFILE, &prevrl)) 4648c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "getrlimit"); 4658c2ecf20Sopenharmony_ci rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50; 4668c2ecf20Sopenharmony_ci printinfo("Setting RLIMIT_NOFILE rlimit from %" PRIu64 " to: %" PRIu64 "\n", 4678c2ecf20Sopenharmony_ci (uint64_t)prevrl.rlim_max, (uint64_t)rl.rlim_max); 4688c2ecf20Sopenharmony_ci if (setrlimit(RLIMIT_NOFILE, &rl) < 0) 4698c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "setrlimit"); 4708c2ecf20Sopenharmony_ci 4718c2ecf20Sopenharmony_ci printf("Run summary [PID %d]: %d threads monitoring%s on " 4728c2ecf20Sopenharmony_ci "%d file-descriptors for %d secs.\n\n", 4738c2ecf20Sopenharmony_ci getpid(), nthreads, oneshot ? " (EPOLLONESHOT semantics)": "", nfds, nsecs); 4748c2ecf20Sopenharmony_ci 4758c2ecf20Sopenharmony_ci init_stats(&throughput_stats); 4768c2ecf20Sopenharmony_ci pthread_mutex_init(&thread_lock, NULL); 4778c2ecf20Sopenharmony_ci pthread_cond_init(&thread_parent, NULL); 4788c2ecf20Sopenharmony_ci pthread_cond_init(&thread_worker, NULL); 4798c2ecf20Sopenharmony_ci 4808c2ecf20Sopenharmony_ci threads_starting = nthreads; 4818c2ecf20Sopenharmony_ci 4828c2ecf20Sopenharmony_ci gettimeofday(&bench__start, NULL); 4838c2ecf20Sopenharmony_ci 4848c2ecf20Sopenharmony_ci do_threads(worker, cpu); 4858c2ecf20Sopenharmony_ci 4868c2ecf20Sopenharmony_ci pthread_mutex_lock(&thread_lock); 4878c2ecf20Sopenharmony_ci while (threads_starting) 4888c2ecf20Sopenharmony_ci pthread_cond_wait(&thread_parent, &thread_lock); 4898c2ecf20Sopenharmony_ci pthread_cond_broadcast(&thread_worker); 4908c2ecf20Sopenharmony_ci pthread_mutex_unlock(&thread_lock); 4918c2ecf20Sopenharmony_ci 4928c2ecf20Sopenharmony_ci /* 4938c2ecf20Sopenharmony_ci * At this point the workers should be blocked waiting for read events 4948c2ecf20Sopenharmony_ci * to become ready. Launch the writer which will constantly be writing 4958c2ecf20Sopenharmony_ci * to each thread's fdmap. 4968c2ecf20Sopenharmony_ci */ 4978c2ecf20Sopenharmony_ci ret = pthread_create(&wthread, NULL, writerfn, 4988c2ecf20Sopenharmony_ci (void *)(struct worker *) worker); 4998c2ecf20Sopenharmony_ci if (ret) 5008c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "pthread_create"); 5018c2ecf20Sopenharmony_ci 5028c2ecf20Sopenharmony_ci sleep(nsecs); 5038c2ecf20Sopenharmony_ci toggle_done(0, NULL, NULL); 5048c2ecf20Sopenharmony_ci printinfo("main thread: toggling done\n"); 5058c2ecf20Sopenharmony_ci 5068c2ecf20Sopenharmony_ci sleep(1); /* meh */ 5078c2ecf20Sopenharmony_ci wdone = true; 5088c2ecf20Sopenharmony_ci ret = pthread_join(wthread, NULL); 5098c2ecf20Sopenharmony_ci if (ret) 5108c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "pthread_join"); 5118c2ecf20Sopenharmony_ci 5128c2ecf20Sopenharmony_ci /* cleanup & report results */ 5138c2ecf20Sopenharmony_ci pthread_cond_destroy(&thread_parent); 5148c2ecf20Sopenharmony_ci pthread_cond_destroy(&thread_worker); 5158c2ecf20Sopenharmony_ci pthread_mutex_destroy(&thread_lock); 5168c2ecf20Sopenharmony_ci 5178c2ecf20Sopenharmony_ci /* sort the array back before reporting */ 5188c2ecf20Sopenharmony_ci if (randomize) 5198c2ecf20Sopenharmony_ci qsort(worker, nthreads, sizeof(struct worker), cmpworker); 5208c2ecf20Sopenharmony_ci 5218c2ecf20Sopenharmony_ci for (i = 0; i < nthreads; i++) { 5228c2ecf20Sopenharmony_ci unsigned long t = bench__runtime.tv_sec > 0 ? 5238c2ecf20Sopenharmony_ci worker[i].ops / bench__runtime.tv_sec : 0; 5248c2ecf20Sopenharmony_ci 5258c2ecf20Sopenharmony_ci update_stats(&throughput_stats, t); 5268c2ecf20Sopenharmony_ci 5278c2ecf20Sopenharmony_ci if (nfds == 1) 5288c2ecf20Sopenharmony_ci printf("[thread %2d] fdmap: %p [ %04ld ops/sec ]\n", 5298c2ecf20Sopenharmony_ci worker[i].tid, &worker[i].fdmap[0], t); 5308c2ecf20Sopenharmony_ci else 5318c2ecf20Sopenharmony_ci printf("[thread %2d] fdmap: %p ... %p [ %04ld ops/sec ]\n", 5328c2ecf20Sopenharmony_ci worker[i].tid, &worker[i].fdmap[0], 5338c2ecf20Sopenharmony_ci &worker[i].fdmap[nfds-1], t); 5348c2ecf20Sopenharmony_ci } 5358c2ecf20Sopenharmony_ci 5368c2ecf20Sopenharmony_ci print_summary(); 5378c2ecf20Sopenharmony_ci 5388c2ecf20Sopenharmony_ci close(epollfd); 5398c2ecf20Sopenharmony_ci return ret; 5408c2ecf20Sopenharmony_cierrmem: 5418c2ecf20Sopenharmony_ci err(EXIT_FAILURE, "calloc"); 5428c2ecf20Sopenharmony_ci} 5438c2ecf20Sopenharmony_ci#endif // HAVE_EVENTFD_SUPPORT 544