xref: /kernel/linux/linux-6.6/fs/ceph/metric.c (revision 62306a36)
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/ceph/ceph_debug.h>
3
4#include <linux/types.h>
5#include <linux/percpu_counter.h>
6#include <linux/math64.h>
7
8#include "metric.h"
9#include "mds_client.h"
10
11static void ktime_to_ceph_timespec(struct ceph_timespec *ts, ktime_t val)
12{
13	struct timespec64 t = ktime_to_timespec64(val);
14	ceph_encode_timespec64(ts, &t);
15}
16
17static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
18				   struct ceph_mds_session *s)
19{
20	struct ceph_metric_head *head;
21	struct ceph_metric_cap *cap;
22	struct ceph_metric_read_latency *read;
23	struct ceph_metric_write_latency *write;
24	struct ceph_metric_metadata_latency *meta;
25	struct ceph_metric_dlease *dlease;
26	struct ceph_opened_files *files;
27	struct ceph_pinned_icaps *icaps;
28	struct ceph_opened_inodes *inodes;
29	struct ceph_read_io_size *rsize;
30	struct ceph_write_io_size *wsize;
31	struct ceph_client_metric *m = &mdsc->metric;
32	u64 nr_caps = atomic64_read(&m->total_caps);
33	u32 header_len = sizeof(struct ceph_metric_header);
34	struct ceph_msg *msg;
35	s64 sum;
36	s32 items = 0;
37	s32 len;
38
39	/* Do not send the metrics until the MDS rank is ready */
40	mutex_lock(&mdsc->mutex);
41	if (ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) != CEPH_MDS_STATE_ACTIVE) {
42		mutex_unlock(&mdsc->mutex);
43		return false;
44	}
45	mutex_unlock(&mdsc->mutex);
46
47	len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
48	      + sizeof(*meta) + sizeof(*dlease) + sizeof(*files)
49	      + sizeof(*icaps) + sizeof(*inodes) + sizeof(*rsize)
50	      + sizeof(*wsize);
51
52	msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
53	if (!msg) {
54		pr_err("send metrics to mds%d, failed to allocate message\n",
55		       s->s_mds);
56		return false;
57	}
58
59	head = msg->front.iov_base;
60
61	/* encode the cap metric */
62	cap = (struct ceph_metric_cap *)(head + 1);
63	cap->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO);
64	cap->header.ver = 1;
65	cap->header.compat = 1;
66	cap->header.data_len = cpu_to_le32(sizeof(*cap) - header_len);
67	cap->hit = cpu_to_le64(percpu_counter_sum(&m->i_caps_hit));
68	cap->mis = cpu_to_le64(percpu_counter_sum(&m->i_caps_mis));
69	cap->total = cpu_to_le64(nr_caps);
70	items++;
71
72	/* encode the read latency metric */
73	read = (struct ceph_metric_read_latency *)(cap + 1);
74	read->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
75	read->header.ver = 2;
76	read->header.compat = 1;
77	read->header.data_len = cpu_to_le32(sizeof(*read) - header_len);
78	sum = m->metric[METRIC_READ].latency_sum;
79	ktime_to_ceph_timespec(&read->lat, sum);
80	ktime_to_ceph_timespec(&read->avg, m->metric[METRIC_READ].latency_avg);
81	read->sq_sum = cpu_to_le64(m->metric[METRIC_READ].latency_sq_sum);
82	read->count = cpu_to_le64(m->metric[METRIC_READ].total);
83	items++;
84
85	/* encode the write latency metric */
86	write = (struct ceph_metric_write_latency *)(read + 1);
87	write->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
88	write->header.ver = 2;
89	write->header.compat = 1;
90	write->header.data_len = cpu_to_le32(sizeof(*write) - header_len);
91	sum = m->metric[METRIC_WRITE].latency_sum;
92	ktime_to_ceph_timespec(&write->lat, sum);
93	ktime_to_ceph_timespec(&write->avg, m->metric[METRIC_WRITE].latency_avg);
94	write->sq_sum = cpu_to_le64(m->metric[METRIC_WRITE].latency_sq_sum);
95	write->count = cpu_to_le64(m->metric[METRIC_WRITE].total);
96	items++;
97
98	/* encode the metadata latency metric */
99	meta = (struct ceph_metric_metadata_latency *)(write + 1);
100	meta->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
101	meta->header.ver = 2;
102	meta->header.compat = 1;
103	meta->header.data_len = cpu_to_le32(sizeof(*meta) - header_len);
104	sum = m->metric[METRIC_METADATA].latency_sum;
105	ktime_to_ceph_timespec(&meta->lat, sum);
106	ktime_to_ceph_timespec(&meta->avg, m->metric[METRIC_METADATA].latency_avg);
107	meta->sq_sum = cpu_to_le64(m->metric[METRIC_METADATA].latency_sq_sum);
108	meta->count = cpu_to_le64(m->metric[METRIC_METADATA].total);
109	items++;
110
111	/* encode the dentry lease metric */
112	dlease = (struct ceph_metric_dlease *)(meta + 1);
113	dlease->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_DENTRY_LEASE);
114	dlease->header.ver = 1;
115	dlease->header.compat = 1;
116	dlease->header.data_len = cpu_to_le32(sizeof(*dlease) - header_len);
117	dlease->hit = cpu_to_le64(percpu_counter_sum(&m->d_lease_hit));
118	dlease->mis = cpu_to_le64(percpu_counter_sum(&m->d_lease_mis));
119	dlease->total = cpu_to_le64(atomic64_read(&m->total_dentries));
120	items++;
121
122	sum = percpu_counter_sum(&m->total_inodes);
123
124	/* encode the opened files metric */
125	files = (struct ceph_opened_files *)(dlease + 1);
126	files->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_FILES);
127	files->header.ver = 1;
128	files->header.compat = 1;
129	files->header.data_len = cpu_to_le32(sizeof(*files) - header_len);
130	files->opened_files = cpu_to_le64(atomic64_read(&m->opened_files));
131	files->total = cpu_to_le64(sum);
132	items++;
133
134	/* encode the pinned icaps metric */
135	icaps = (struct ceph_pinned_icaps *)(files + 1);
136	icaps->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_PINNED_ICAPS);
137	icaps->header.ver = 1;
138	icaps->header.compat = 1;
139	icaps->header.data_len = cpu_to_le32(sizeof(*icaps) - header_len);
140	icaps->pinned_icaps = cpu_to_le64(nr_caps);
141	icaps->total = cpu_to_le64(sum);
142	items++;
143
144	/* encode the opened inodes metric */
145	inodes = (struct ceph_opened_inodes *)(icaps + 1);
146	inodes->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_INODES);
147	inodes->header.ver = 1;
148	inodes->header.compat = 1;
149	inodes->header.data_len = cpu_to_le32(sizeof(*inodes) - header_len);
150	inodes->opened_inodes = cpu_to_le64(percpu_counter_sum(&m->opened_inodes));
151	inodes->total = cpu_to_le64(sum);
152	items++;
153
154	/* encode the read io size metric */
155	rsize = (struct ceph_read_io_size *)(inodes + 1);
156	rsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_IO_SIZES);
157	rsize->header.ver = 1;
158	rsize->header.compat = 1;
159	rsize->header.data_len = cpu_to_le32(sizeof(*rsize) - header_len);
160	rsize->total_ops = cpu_to_le64(m->metric[METRIC_READ].total);
161	rsize->total_size = cpu_to_le64(m->metric[METRIC_READ].size_sum);
162	items++;
163
164	/* encode the write io size metric */
165	wsize = (struct ceph_write_io_size *)(rsize + 1);
166	wsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_IO_SIZES);
167	wsize->header.ver = 1;
168	wsize->header.compat = 1;
169	wsize->header.data_len = cpu_to_le32(sizeof(*wsize) - header_len);
170	wsize->total_ops = cpu_to_le64(m->metric[METRIC_WRITE].total);
171	wsize->total_size = cpu_to_le64(m->metric[METRIC_WRITE].size_sum);
172	items++;
173
174	put_unaligned_le32(items, &head->num);
175	msg->front.iov_len = len;
176	msg->hdr.version = cpu_to_le16(1);
177	msg->hdr.compat_version = cpu_to_le16(1);
178	msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
179	ceph_con_send(&s->s_con, msg);
180
181	return true;
182}
183
184
185static void metric_get_session(struct ceph_mds_client *mdsc)
186{
187	struct ceph_mds_session *s;
188	int i;
189
190	mutex_lock(&mdsc->mutex);
191	for (i = 0; i < mdsc->max_sessions; i++) {
192		s = __ceph_lookup_mds_session(mdsc, i);
193		if (!s)
194			continue;
195
196		/*
197		 * Skip it if MDS doesn't support the metric collection,
198		 * or the MDS will close the session's socket connection
199		 * directly when it get this message.
200		 */
201		if (check_session_state(s) &&
202		    test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) {
203			mdsc->metric.session = s;
204			break;
205		}
206
207		ceph_put_mds_session(s);
208	}
209	mutex_unlock(&mdsc->mutex);
210}
211
212static void metric_delayed_work(struct work_struct *work)
213{
214	struct ceph_client_metric *m =
215		container_of(work, struct ceph_client_metric, delayed_work.work);
216	struct ceph_mds_client *mdsc =
217		container_of(m, struct ceph_mds_client, metric);
218
219	if (mdsc->stopping || disable_send_metrics)
220		return;
221
222	if (!m->session || !check_session_state(m->session)) {
223		if (m->session) {
224			ceph_put_mds_session(m->session);
225			m->session = NULL;
226		}
227		metric_get_session(mdsc);
228	}
229	if (m->session) {
230		ceph_mdsc_send_metrics(mdsc, m->session);
231		metric_schedule_delayed(m);
232	}
233}
234
235int ceph_metric_init(struct ceph_client_metric *m)
236{
237	struct ceph_metric *metric;
238	int ret, i;
239
240	if (!m)
241		return -EINVAL;
242
243	atomic64_set(&m->total_dentries, 0);
244	ret = percpu_counter_init(&m->d_lease_hit, 0, GFP_KERNEL);
245	if (ret)
246		return ret;
247
248	ret = percpu_counter_init(&m->d_lease_mis, 0, GFP_KERNEL);
249	if (ret)
250		goto err_d_lease_mis;
251
252	atomic64_set(&m->total_caps, 0);
253	ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL);
254	if (ret)
255		goto err_i_caps_hit;
256
257	ret = percpu_counter_init(&m->i_caps_mis, 0, GFP_KERNEL);
258	if (ret)
259		goto err_i_caps_mis;
260
261	for (i = 0; i < METRIC_MAX; i++) {
262		metric = &m->metric[i];
263		spin_lock_init(&metric->lock);
264		metric->size_sum = 0;
265		metric->size_min = U64_MAX;
266		metric->size_max = 0;
267		metric->total = 0;
268		metric->latency_sum = 0;
269		metric->latency_avg = 0;
270		metric->latency_sq_sum = 0;
271		metric->latency_min = KTIME_MAX;
272		metric->latency_max = 0;
273	}
274
275	atomic64_set(&m->opened_files, 0);
276	ret = percpu_counter_init(&m->opened_inodes, 0, GFP_KERNEL);
277	if (ret)
278		goto err_opened_inodes;
279	ret = percpu_counter_init(&m->total_inodes, 0, GFP_KERNEL);
280	if (ret)
281		goto err_total_inodes;
282
283	m->session = NULL;
284	INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
285
286	return 0;
287
288err_total_inodes:
289	percpu_counter_destroy(&m->opened_inodes);
290err_opened_inodes:
291	percpu_counter_destroy(&m->i_caps_mis);
292err_i_caps_mis:
293	percpu_counter_destroy(&m->i_caps_hit);
294err_i_caps_hit:
295	percpu_counter_destroy(&m->d_lease_mis);
296err_d_lease_mis:
297	percpu_counter_destroy(&m->d_lease_hit);
298
299	return ret;
300}
301
302void ceph_metric_destroy(struct ceph_client_metric *m)
303{
304	if (!m)
305		return;
306
307	cancel_delayed_work_sync(&m->delayed_work);
308
309	percpu_counter_destroy(&m->total_inodes);
310	percpu_counter_destroy(&m->opened_inodes);
311	percpu_counter_destroy(&m->i_caps_mis);
312	percpu_counter_destroy(&m->i_caps_hit);
313	percpu_counter_destroy(&m->d_lease_mis);
314	percpu_counter_destroy(&m->d_lease_hit);
315
316	ceph_put_mds_session(m->session);
317}
318
319#define METRIC_UPDATE_MIN_MAX(min, max, new)	\
320{						\
321	if (unlikely(new < min))		\
322		min = new;			\
323	if (unlikely(new > max))		\
324		max = new;			\
325}
326
327static inline void __update_mean_and_stdev(ktime_t total, ktime_t *lavg,
328					   ktime_t *sq_sump, ktime_t lat)
329{
330	ktime_t avg;
331
332	if (unlikely(total == 1)) {
333		*lavg = lat;
334	} else {
335		/* the sq is (lat - old_avg) * (lat - new_avg) */
336		avg = *lavg + div64_s64(lat - *lavg, total);
337		*sq_sump += (lat - *lavg)*(lat - avg);
338		*lavg = avg;
339	}
340}
341
342void ceph_update_metrics(struct ceph_metric *m,
343			 ktime_t r_start, ktime_t r_end,
344			 unsigned int size, int rc)
345{
346	ktime_t lat = ktime_sub(r_end, r_start);
347	ktime_t total;
348
349	if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
350		return;
351
352	spin_lock(&m->lock);
353	total = ++m->total;
354	m->size_sum += size;
355	METRIC_UPDATE_MIN_MAX(m->size_min, m->size_max, size);
356	m->latency_sum += lat;
357	METRIC_UPDATE_MIN_MAX(m->latency_min, m->latency_max, lat);
358	__update_mean_and_stdev(total, &m->latency_avg,	&m->latency_sq_sum,
359				lat);
360	spin_unlock(&m->lock);
361}
362