1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fill_buf benchmark
4 *
5 * Copyright (C) 2018 Intel Corporation
6 *
7 * Authors:
8 *    Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
9 *    Fenghua Yu <fenghua.yu@intel.com>
10 */
11#include <stdio.h>
12#include <unistd.h>
13#include <stdlib.h>
14#include <sys/types.h>
15#include <sys/wait.h>
16#include <inttypes.h>
17#include <string.h>
18
19#include "resctrl.h"
20
21#define CL_SIZE			(64)
22#define PAGE_SIZE		(4 * 1024)
23#define MB			(1024 * 1024)
24
25static void sb(void)
26{
27#if defined(__i386) || defined(__x86_64)
28	asm volatile("sfence\n\t"
29		     : : : "memory");
30#endif
31}
32
33static void cl_flush(void *p)
34{
35#if defined(__i386) || defined(__x86_64)
36	asm volatile("clflush (%0)\n\t"
37		     : : "r"(p) : "memory");
38#endif
39}
40
41static void mem_flush(unsigned char *buf, size_t buf_size)
42{
43	unsigned char *cp = buf;
44	size_t i = 0;
45
46	buf_size = buf_size / CL_SIZE; /* mem size in cache lines */
47
48	for (i = 0; i < buf_size; i++)
49		cl_flush(&cp[i * CL_SIZE]);
50
51	sb();
52}
53
54static void *malloc_and_init_memory(size_t buf_size)
55{
56	void *p = NULL;
57	uint64_t *p64;
58	size_t s64;
59	int ret;
60
61	ret = posix_memalign(&p, PAGE_SIZE, buf_size);
62	if (ret < 0)
63		return NULL;
64
65	p64 = (uint64_t *)p;
66	s64 = buf_size / sizeof(uint64_t);
67
68	while (s64 > 0) {
69		*p64 = (uint64_t)rand();
70		p64 += (CL_SIZE / sizeof(uint64_t));
71		s64 -= (CL_SIZE / sizeof(uint64_t));
72	}
73
74	return p;
75}
76
77static int fill_one_span_read(unsigned char *buf, size_t buf_size)
78{
79	unsigned char *end_ptr = buf + buf_size;
80	unsigned char sum, *p;
81
82	sum = 0;
83	p = buf;
84	while (p < end_ptr) {
85		sum += *p;
86		p += (CL_SIZE / 2);
87	}
88
89	return sum;
90}
91
92static void fill_one_span_write(unsigned char *buf, size_t buf_size)
93{
94	unsigned char *end_ptr = buf + buf_size;
95	unsigned char *p;
96
97	p = buf;
98	while (p < end_ptr) {
99		*p = '1';
100		p += (CL_SIZE / 2);
101	}
102}
103
104static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once)
105{
106	int ret = 0;
107	FILE *fp;
108
109	while (1) {
110		ret = fill_one_span_read(buf, buf_size);
111		if (once)
112			break;
113	}
114
115	/* Consume read result so that reading memory is not optimized out. */
116	fp = fopen("/dev/null", "w");
117	if (!fp) {
118		perror("Unable to write to /dev/null");
119		return -1;
120	}
121	fprintf(fp, "Sum: %d ", ret);
122	fclose(fp);
123
124	return 0;
125}
126
127static int fill_cache_write(unsigned char *buf, size_t buf_size, bool once)
128{
129	while (1) {
130		fill_one_span_write(buf, buf_size);
131		if (once)
132			break;
133	}
134
135	return 0;
136}
137
138static int fill_cache(size_t buf_size, int memflush, int op, bool once)
139{
140	unsigned char *buf;
141	int ret;
142
143	buf = malloc_and_init_memory(buf_size);
144	if (!buf)
145		return -1;
146
147	/* Flush the memory before using to avoid "cache hot pages" effect */
148	if (memflush)
149		mem_flush(buf, buf_size);
150
151	if (op == 0)
152		ret = fill_cache_read(buf, buf_size, once);
153	else
154		ret = fill_cache_write(buf, buf_size, once);
155
156	free(buf);
157
158	if (ret) {
159		printf("\n Error in fill cache read/write...\n");
160		return -1;
161	}
162
163
164	return 0;
165}
166
167int run_fill_buf(size_t span, int memflush, int op, bool once)
168{
169	size_t cache_size = span;
170	int ret;
171
172	ret = fill_cache(cache_size, memflush, op, once);
173	if (ret) {
174		printf("\n Error in fill cache\n");
175		return -1;
176	}
177
178	return 0;
179}
180