18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Read/write thread of a guest agent for virtio-trace 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * Copyright (C) 2012 Hitachi, Ltd. 68c2ecf20Sopenharmony_ci * Created by Yoshihiro Yunomae <yoshihiro.yunomae.ez@hitachi.com> 78c2ecf20Sopenharmony_ci * Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> 88c2ecf20Sopenharmony_ci */ 98c2ecf20Sopenharmony_ci 108c2ecf20Sopenharmony_ci#define _GNU_SOURCE 118c2ecf20Sopenharmony_ci#include <fcntl.h> 128c2ecf20Sopenharmony_ci#include <stdio.h> 138c2ecf20Sopenharmony_ci#include <stdlib.h> 148c2ecf20Sopenharmony_ci#include <unistd.h> 158c2ecf20Sopenharmony_ci#include <sys/syscall.h> 168c2ecf20Sopenharmony_ci#include "trace-agent.h" 178c2ecf20Sopenharmony_ci 188c2ecf20Sopenharmony_ci#define READ_WAIT_USEC 100000 198c2ecf20Sopenharmony_ci 208c2ecf20Sopenharmony_civoid *rw_thread_info_new(void) 218c2ecf20Sopenharmony_ci{ 228c2ecf20Sopenharmony_ci struct rw_thread_info *rw_ti; 238c2ecf20Sopenharmony_ci 248c2ecf20Sopenharmony_ci rw_ti = zalloc(sizeof(struct rw_thread_info)); 258c2ecf20Sopenharmony_ci if (rw_ti == NULL) { 268c2ecf20Sopenharmony_ci pr_err("rw_thread_info zalloc error\n"); 278c2ecf20Sopenharmony_ci exit(EXIT_FAILURE); 288c2ecf20Sopenharmony_ci } 298c2ecf20Sopenharmony_ci 308c2ecf20Sopenharmony_ci rw_ti->cpu_num = -1; 318c2ecf20Sopenharmony_ci rw_ti->in_fd = -1; 328c2ecf20Sopenharmony_ci rw_ti->out_fd = -1; 338c2ecf20Sopenharmony_ci rw_ti->read_pipe = -1; 348c2ecf20Sopenharmony_ci rw_ti->write_pipe = -1; 358c2ecf20Sopenharmony_ci rw_ti->pipe_size = PIPE_INIT; 368c2ecf20Sopenharmony_ci 378c2ecf20Sopenharmony_ci return rw_ti; 388c2ecf20Sopenharmony_ci} 398c2ecf20Sopenharmony_ci 408c2ecf20Sopenharmony_civoid *rw_thread_init(int cpu, const char *in_path, const char *out_path, 418c2ecf20Sopenharmony_ci bool stdout_flag, unsigned long pipe_size, 428c2ecf20Sopenharmony_ci struct rw_thread_info *rw_ti) 438c2ecf20Sopenharmony_ci{ 448c2ecf20Sopenharmony_ci int data_pipe[2]; 458c2ecf20Sopenharmony_ci 468c2ecf20Sopenharmony_ci rw_ti->cpu_num = cpu; 478c2ecf20Sopenharmony_ci 488c2ecf20Sopenharmony_ci /* set read(input) fd */ 498c2ecf20Sopenharmony_ci rw_ti->in_fd = open(in_path, O_RDONLY); 508c2ecf20Sopenharmony_ci if (rw_ti->in_fd == -1) { 518c2ecf20Sopenharmony_ci pr_err("Could not open in_fd (CPU:%d)\n", cpu); 528c2ecf20Sopenharmony_ci goto error; 538c2ecf20Sopenharmony_ci } 548c2ecf20Sopenharmony_ci 558c2ecf20Sopenharmony_ci /* set write(output) fd */ 568c2ecf20Sopenharmony_ci if (!stdout_flag) { 578c2ecf20Sopenharmony_ci /* virtio-serial output mode */ 588c2ecf20Sopenharmony_ci rw_ti->out_fd = open(out_path, O_WRONLY); 598c2ecf20Sopenharmony_ci if (rw_ti->out_fd == -1) { 608c2ecf20Sopenharmony_ci pr_err("Could not open out_fd (CPU:%d)\n", cpu); 618c2ecf20Sopenharmony_ci goto error; 628c2ecf20Sopenharmony_ci } 638c2ecf20Sopenharmony_ci } else 648c2ecf20Sopenharmony_ci /* stdout mode */ 658c2ecf20Sopenharmony_ci rw_ti->out_fd = STDOUT_FILENO; 668c2ecf20Sopenharmony_ci 678c2ecf20Sopenharmony_ci if (pipe2(data_pipe, O_NONBLOCK) < 0) { 688c2ecf20Sopenharmony_ci pr_err("Could not create pipe in rw-thread(%d)\n", cpu); 698c2ecf20Sopenharmony_ci goto error; 708c2ecf20Sopenharmony_ci } 718c2ecf20Sopenharmony_ci 728c2ecf20Sopenharmony_ci /* 738c2ecf20Sopenharmony_ci * Size of pipe is 64kB in default based on fs/pipe.c. 748c2ecf20Sopenharmony_ci * To read/write trace data speedy, pipe size is changed. 758c2ecf20Sopenharmony_ci */ 768c2ecf20Sopenharmony_ci if (fcntl(*data_pipe, F_SETPIPE_SZ, pipe_size) < 0) { 778c2ecf20Sopenharmony_ci pr_err("Could not change pipe size in rw-thread(%d)\n", cpu); 788c2ecf20Sopenharmony_ci goto error; 798c2ecf20Sopenharmony_ci } 808c2ecf20Sopenharmony_ci 818c2ecf20Sopenharmony_ci rw_ti->read_pipe = data_pipe[1]; 828c2ecf20Sopenharmony_ci rw_ti->write_pipe = data_pipe[0]; 838c2ecf20Sopenharmony_ci rw_ti->pipe_size = pipe_size; 848c2ecf20Sopenharmony_ci 858c2ecf20Sopenharmony_ci return NULL; 868c2ecf20Sopenharmony_ci 878c2ecf20Sopenharmony_cierror: 888c2ecf20Sopenharmony_ci exit(EXIT_FAILURE); 898c2ecf20Sopenharmony_ci} 908c2ecf20Sopenharmony_ci 918c2ecf20Sopenharmony_ci/* Bind a thread to a cpu */ 928c2ecf20Sopenharmony_cistatic void bind_cpu(int cpu_num) 938c2ecf20Sopenharmony_ci{ 948c2ecf20Sopenharmony_ci cpu_set_t mask; 958c2ecf20Sopenharmony_ci 968c2ecf20Sopenharmony_ci CPU_ZERO(&mask); 978c2ecf20Sopenharmony_ci CPU_SET(cpu_num, &mask); 988c2ecf20Sopenharmony_ci 998c2ecf20Sopenharmony_ci /* bind my thread to cpu_num by assigning zero to the first argument */ 1008c2ecf20Sopenharmony_ci if (sched_setaffinity(0, sizeof(mask), &mask) == -1) 1018c2ecf20Sopenharmony_ci pr_err("Could not set CPU#%d affinity\n", (int)cpu_num); 1028c2ecf20Sopenharmony_ci} 1038c2ecf20Sopenharmony_ci 1048c2ecf20Sopenharmony_cistatic void *rw_thread_main(void *thread_info) 1058c2ecf20Sopenharmony_ci{ 1068c2ecf20Sopenharmony_ci ssize_t rlen, wlen; 1078c2ecf20Sopenharmony_ci ssize_t ret; 1088c2ecf20Sopenharmony_ci struct rw_thread_info *ts = (struct rw_thread_info *)thread_info; 1098c2ecf20Sopenharmony_ci 1108c2ecf20Sopenharmony_ci bind_cpu(ts->cpu_num); 1118c2ecf20Sopenharmony_ci 1128c2ecf20Sopenharmony_ci while (1) { 1138c2ecf20Sopenharmony_ci /* Wait for a read order of trace data by Host OS */ 1148c2ecf20Sopenharmony_ci if (!global_run_operation) { 1158c2ecf20Sopenharmony_ci pthread_mutex_lock(&mutex_notify); 1168c2ecf20Sopenharmony_ci pthread_cond_wait(&cond_wakeup, &mutex_notify); 1178c2ecf20Sopenharmony_ci pthread_mutex_unlock(&mutex_notify); 1188c2ecf20Sopenharmony_ci } 1198c2ecf20Sopenharmony_ci 1208c2ecf20Sopenharmony_ci if (global_sig_receive) 1218c2ecf20Sopenharmony_ci break; 1228c2ecf20Sopenharmony_ci 1238c2ecf20Sopenharmony_ci /* 1248c2ecf20Sopenharmony_ci * Each thread read trace_pipe_raw of each cpu bounding the 1258c2ecf20Sopenharmony_ci * thread, so contention of multi-threads does not occur. 1268c2ecf20Sopenharmony_ci */ 1278c2ecf20Sopenharmony_ci rlen = splice(ts->in_fd, NULL, ts->read_pipe, NULL, 1288c2ecf20Sopenharmony_ci ts->pipe_size, SPLICE_F_MOVE | SPLICE_F_MORE); 1298c2ecf20Sopenharmony_ci 1308c2ecf20Sopenharmony_ci if (rlen < 0) { 1318c2ecf20Sopenharmony_ci pr_err("Splice_read in rw-thread(%d)\n", ts->cpu_num); 1328c2ecf20Sopenharmony_ci goto error; 1338c2ecf20Sopenharmony_ci } else if (rlen == 0) { 1348c2ecf20Sopenharmony_ci /* 1358c2ecf20Sopenharmony_ci * If trace data do not exist or are unreadable not 1368c2ecf20Sopenharmony_ci * for exceeding the page size, splice_read returns 1378c2ecf20Sopenharmony_ci * NULL. Then, this waits for being filled the data in a 1388c2ecf20Sopenharmony_ci * ring-buffer. 1398c2ecf20Sopenharmony_ci */ 1408c2ecf20Sopenharmony_ci usleep(READ_WAIT_USEC); 1418c2ecf20Sopenharmony_ci pr_debug("Read retry(cpu:%d)\n", ts->cpu_num); 1428c2ecf20Sopenharmony_ci continue; 1438c2ecf20Sopenharmony_ci } 1448c2ecf20Sopenharmony_ci 1458c2ecf20Sopenharmony_ci wlen = 0; 1468c2ecf20Sopenharmony_ci 1478c2ecf20Sopenharmony_ci do { 1488c2ecf20Sopenharmony_ci ret = splice(ts->write_pipe, NULL, ts->out_fd, NULL, 1498c2ecf20Sopenharmony_ci rlen - wlen, 1508c2ecf20Sopenharmony_ci SPLICE_F_MOVE | SPLICE_F_MORE); 1518c2ecf20Sopenharmony_ci 1528c2ecf20Sopenharmony_ci if (ret < 0) { 1538c2ecf20Sopenharmony_ci pr_err("Splice_write in rw-thread(%d)\n", 1548c2ecf20Sopenharmony_ci ts->cpu_num); 1558c2ecf20Sopenharmony_ci goto error; 1568c2ecf20Sopenharmony_ci } else if (ret == 0) 1578c2ecf20Sopenharmony_ci /* 1588c2ecf20Sopenharmony_ci * When host reader is not in time for reading 1598c2ecf20Sopenharmony_ci * trace data, guest will be stopped. This is 1608c2ecf20Sopenharmony_ci * because char dev in QEMU is not supported 1618c2ecf20Sopenharmony_ci * non-blocking mode. Then, writer might be 1628c2ecf20Sopenharmony_ci * sleep in that case. 1638c2ecf20Sopenharmony_ci * This sleep will be removed by supporting 1648c2ecf20Sopenharmony_ci * non-blocking mode. 1658c2ecf20Sopenharmony_ci */ 1668c2ecf20Sopenharmony_ci sleep(1); 1678c2ecf20Sopenharmony_ci wlen += ret; 1688c2ecf20Sopenharmony_ci } while (wlen < rlen); 1698c2ecf20Sopenharmony_ci } 1708c2ecf20Sopenharmony_ci 1718c2ecf20Sopenharmony_ci return NULL; 1728c2ecf20Sopenharmony_ci 1738c2ecf20Sopenharmony_cierror: 1748c2ecf20Sopenharmony_ci exit(EXIT_FAILURE); 1758c2ecf20Sopenharmony_ci} 1768c2ecf20Sopenharmony_ci 1778c2ecf20Sopenharmony_ci 1788c2ecf20Sopenharmony_cipthread_t rw_thread_run(struct rw_thread_info *rw_ti) 1798c2ecf20Sopenharmony_ci{ 1808c2ecf20Sopenharmony_ci int ret; 1818c2ecf20Sopenharmony_ci pthread_t rw_thread_per_cpu; 1828c2ecf20Sopenharmony_ci 1838c2ecf20Sopenharmony_ci ret = pthread_create(&rw_thread_per_cpu, NULL, rw_thread_main, rw_ti); 1848c2ecf20Sopenharmony_ci if (ret != 0) { 1858c2ecf20Sopenharmony_ci pr_err("Could not create a rw thread(%d)\n", rw_ti->cpu_num); 1868c2ecf20Sopenharmony_ci exit(EXIT_FAILURE); 1878c2ecf20Sopenharmony_ci } 1888c2ecf20Sopenharmony_ci 1898c2ecf20Sopenharmony_ci return rw_thread_per_cpu; 1908c2ecf20Sopenharmony_ci} 191