1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/hmdfs/comm/device_node.c
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8#include "device_node.h"
9
10#include <linux/errno.h>
11#include <linux/fs.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
14#include <linux/kfifo.h>
15#include <linux/module.h>
16#include <linux/string.h>
17#include <linux/sysfs.h>
18#include <linux/types.h>
19#include <linux/backing-dev.h>
20
21#include "client_writeback.h"
22#include "server_writeback.h"
23#include "connection.h"
24#include "hmdfs_client.h"
25#include "socket_adapter.h"
26#include "authority/authentication.h"
27
28DEFINE_MUTEX(hmdfs_sysfs_mutex);
29static struct kset *hmdfs_kset;
30
31static void ctrl_cmd_update_socket_handler(const char *buf, size_t len,
32					   struct hmdfs_sb_info *sbi)
33{
34	struct update_socket_param cmd;
35	struct hmdfs_peer *node = NULL;
36	struct connection *conn = NULL;
37
38	if (unlikely(!buf || len != sizeof(cmd))) {
39		hmdfs_err("len/buf error");
40		goto out;
41	}
42	memcpy(&cmd, buf, sizeof(cmd));
43	if (cmd.status != CONNECT_STAT_WAIT_REQUEST &&
44		cmd.status != CONNECT_STAT_WAIT_RESPONSE) {
45		hmdfs_err("invalid status");
46		goto out;
47	}
48
49	node = hmdfs_get_peer(sbi, cmd.cid, cmd.devsl);
50	if (unlikely(!node)) {
51		hmdfs_err("failed to update ctrl node: cannot get peer");
52		goto out;
53	}
54
55	conn = hmdfs_get_conn_tcp(node, cmd.newfd, cmd.masterkey, cmd.status);
56	if (unlikely(!conn)) {
57		hmdfs_err("failed to update ctrl node: cannot get conn");
58	} else if (!sbi->system_cred) {
59		const struct cred *system_cred = get_cred(current_cred());
60
61		if (cmpxchg_relaxed(&sbi->system_cred, NULL, system_cred))
62			put_cred(system_cred);
63		else
64			hmdfs_check_cred(system_cred);
65	}
66
67	if (conn)
68		connection_put(conn);
69out:
70	if (node)
71		peer_put(node);
72}
73
74static void ctrl_cmd_update_devsl_handler(const char *buf, size_t len,
75				      struct hmdfs_sb_info *sbi)
76{
77	struct update_devsl_param cmd;
78	struct hmdfs_peer *node = NULL;
79
80	if (unlikely(!buf || len != sizeof(cmd))) {
81		hmdfs_err("Recved a invalid userbuf");
82		return;
83	}
84	memcpy(&cmd, buf, sizeof(cmd));
85
86	node = hmdfs_lookup_from_cid(sbi, cmd.cid);
87	if (unlikely(!node)) {
88		hmdfs_err("failed to update devsl: cannot get peer");
89		return;
90	}
91	hmdfs_info("Found peer: device_id = %llu", node->device_id);
92	node->devsl = cmd.devsl;
93	peer_put(node);
94}
95
96static inline void hmdfs_disconnect_node_marked(struct hmdfs_peer *conn)
97{
98	hmdfs_start_process_offline(conn);
99	hmdfs_disconnect_node(conn);
100	hmdfs_stop_process_offline(conn);
101}
102
103static void ctrl_cmd_off_line_handler(const char *buf, size_t len,
104				      struct hmdfs_sb_info *sbi)
105{
106	struct offline_param cmd;
107	struct hmdfs_peer *node = NULL;
108
109	if (unlikely(!buf || len != sizeof(cmd))) {
110		hmdfs_err("Recved a invalid userbuf");
111		return;
112	}
113	memcpy(&cmd, buf, sizeof(cmd));
114	node = hmdfs_lookup_from_cid(sbi, cmd.remote_cid);
115	if (unlikely(!node)) {
116		hmdfs_err("Cannot find node by device");
117		return;
118	}
119	hmdfs_info("Found peer: device_id = %llu", node->device_id);
120	hmdfs_disconnect_node_marked(node);
121	peer_put(node);
122}
123
124typedef void (*ctrl_cmd_handler)(const char *buf, size_t len,
125				 struct hmdfs_sb_info *sbi);
126
127static const ctrl_cmd_handler cmd_handler[CMD_CNT] = {
128	[CMD_UPDATE_SOCKET] = ctrl_cmd_update_socket_handler,
129	[CMD_UPDATE_DEVSL] = ctrl_cmd_update_devsl_handler,
130	[CMD_OFF_LINE] = ctrl_cmd_off_line_handler,
131};
132
133static ssize_t sbi_cmd_show(struct kobject *kobj, struct sbi_attribute *attr,
134			    char *buf)
135{
136	struct notify_param param;
137	int out_len;
138	struct hmdfs_sb_info *sbi = to_sbi(kobj);
139
140	memset(&param, 0, sizeof(param));
141	spin_lock(&sbi->notify_fifo_lock);
142	out_len = kfifo_out(&sbi->notify_fifo, &param, sizeof(param));
143	spin_unlock(&sbi->notify_fifo_lock);
144	if (out_len != sizeof(param))
145		param.notify = NOTIFY_NONE;
146	memcpy(buf, &param, sizeof(param));
147	return sizeof(param);
148}
149
150static const char *cmd2str(int cmd)
151{
152	switch (cmd) {
153	case 0:
154		return "CMD_UPDATE_SOCKET";
155	case 1:
156		return "CMD_UPDATE_DEVSL";
157	case 2:
158		return "CMD_OFF_LINE";
159	default:
160		return "illegal cmd";
161	}
162}
163
164static ssize_t sbi_cmd_store(struct kobject *kobj, struct sbi_attribute *attr,
165			     const char *buf, size_t len)
166{
167	int cmd;
168	struct hmdfs_sb_info *sbi = to_sbi(kobj);
169
170	if (!sbi) {
171		hmdfs_info("Fatal! Empty sbi. Mount fs first");
172		return len;
173	}
174	if (len < sizeof(int)) {
175		hmdfs_err("Illegal cmd: cmd len = %zu", len);
176		return len;
177	}
178	cmd = *(int *)buf;
179	if (cmd < 0 || cmd >= CMD_CNT) {
180		hmdfs_err("Illegal cmd : cmd = %d", cmd);
181		return len;
182	}
183	mutex_lock(&sbi->cmd_handler_mutex);
184	hmdfs_info("Recved cmd: %s", cmd2str(cmd));
185	if (cmd_handler[cmd])
186		cmd_handler[cmd](buf, len, sbi);
187	mutex_unlock(&sbi->cmd_handler_mutex);
188	return len;
189}
190
191static struct sbi_attribute sbi_cmd_attr =
192	__ATTR(cmd, 0664, sbi_cmd_show, sbi_cmd_store);
193
194static ssize_t sbi_status_show(struct kobject *kobj, struct sbi_attribute *attr,
195			       char *buf)
196{
197	ssize_t size = 0;
198	struct hmdfs_sb_info *sbi = NULL;
199	struct hmdfs_peer *peer = NULL;
200	struct connection *conn_impl = NULL;
201	struct tcp_handle *tcp = NULL;
202
203	sbi = to_sbi(kobj);
204	size += snprintf(buf + size, PAGE_SIZE - size, "peers status\n");
205
206	mutex_lock(&sbi->connections.node_lock);
207	list_for_each_entry(peer, &sbi->connections.node_list, list) {
208		size += snprintf(buf + size, PAGE_SIZE - size, "%s %d\n",
209				  peer->cid, peer->status);
210		// connection information
211		size += snprintf(
212			buf + size, PAGE_SIZE - size,
213			"\t socket_fd  connection_status  tcp_status  ... refcnt\n");
214		mutex_lock(&peer->conn_impl_list_lock);
215		list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
216			tcp = conn_impl->connect_handle;
217			size += snprintf(buf + size, PAGE_SIZE - size,
218					"\t %d  \t%d  \t%d  \t%p  \t%ld\n",
219					tcp->fd, conn_impl->status,
220					tcp->sock->state, tcp->sock, file_count(tcp->sock->file));
221		}
222		mutex_unlock(&peer->conn_impl_list_lock);
223	}
224	mutex_unlock(&sbi->connections.node_lock);
225	return size;
226}
227
228static ssize_t sbi_status_store(struct kobject *kobj,
229				struct sbi_attribute *attr, const char *buf,
230				size_t len)
231{
232	return len;
233}
234
235static struct sbi_attribute sbi_status_attr =
236	__ATTR(status, 0664, sbi_status_show, sbi_status_store);
237
238static ssize_t sbi_stat_show(struct kobject *kobj, struct sbi_attribute *attr,
239			     char *buf)
240{
241	ssize_t size = 0;
242	struct hmdfs_sb_info *sbi = NULL;
243	struct hmdfs_peer *peer = NULL;
244	struct connection *conn_impl = NULL;
245	struct tcp_handle *tcp = NULL;
246
247	sbi = to_sbi(kobj);
248	mutex_lock(&sbi->connections.node_lock);
249	list_for_each_entry(peer, &sbi->connections.node_list, list) {
250		// connection information
251		mutex_lock(&peer->conn_impl_list_lock);
252		list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
253			tcp = conn_impl->connect_handle;
254			size += snprintf(buf + size, PAGE_SIZE - size,
255					  "socket_fd: %d\n", tcp->fd);
256			size += snprintf(buf + size, PAGE_SIZE - size,
257					"\tsend_msg %d \tsend_bytes %llu\n",
258					conn_impl->stat.send_message_count,
259					conn_impl->stat.send_bytes);
260			size += snprintf(buf + size, PAGE_SIZE - size,
261					"\trecv_msg %d \trecv_bytes %llu\n",
262					conn_impl->stat.recv_message_count,
263					conn_impl->stat.recv_bytes);
264		}
265		mutex_unlock(&peer->conn_impl_list_lock);
266	}
267	mutex_unlock(&sbi->connections.node_lock);
268	return size;
269}
270
271static ssize_t sbi_stat_store(struct kobject *kobj, struct sbi_attribute *attr,
272			      const char *buf, size_t len)
273{
274	struct hmdfs_sb_info *sbi = NULL;
275	struct hmdfs_peer *peer = NULL;
276	struct connection *conn_impl = NULL;
277
278	sbi = to_sbi(kobj);
279	mutex_lock(&sbi->connections.node_lock);
280	list_for_each_entry(peer, &sbi->connections.node_list, list) {
281		// connection information
282		mutex_lock(&peer->conn_impl_list_lock);
283		list_for_each_entry(conn_impl, &peer->conn_impl_list, list) {
284			conn_impl->stat.send_message_count = 0;
285			conn_impl->stat.send_bytes = 0;
286			conn_impl->stat.recv_message_count = 0;
287			conn_impl->stat.recv_bytes = 0;
288		}
289		mutex_unlock(&peer->conn_impl_list_lock);
290	}
291	mutex_unlock(&sbi->connections.node_lock);
292	return len;
293}
294
295static struct sbi_attribute sbi_statistic_attr =
296	__ATTR(statistic, 0664, sbi_stat_show, sbi_stat_store);
297
298static ssize_t sbi_dcache_precision_show(struct kobject *kobj,
299					 struct sbi_attribute *attr, char *buf)
300{
301	return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->dcache_precision);
302}
303
304#define PRECISION_MAX 3600000
305
306static ssize_t sbi_dcache_precision_store(struct kobject *kobj,
307					  struct sbi_attribute *attr,
308					  const char *buf, size_t len)
309{
310	int ret;
311	unsigned int precision;
312	struct hmdfs_sb_info *sbi = to_sbi(kobj);
313
314	ret = kstrtouint(skip_spaces(buf), 0, &precision);
315	if (!ret) {
316		if (precision <= PRECISION_MAX)
317			sbi->dcache_precision = precision;
318		else
319			ret = -EINVAL;
320	}
321
322	return ret ? ret : len;
323}
324
325static struct sbi_attribute sbi_dcache_precision_attr =
326	__ATTR(dcache_precision, 0664, sbi_dcache_precision_show,
327	       sbi_dcache_precision_store);
328
329static ssize_t sbi_dcache_threshold_show(struct kobject *kobj,
330					 struct sbi_attribute *attr, char *buf)
331{
332	return snprintf(buf, PAGE_SIZE, "%lu\n",
333			to_sbi(kobj)->dcache_threshold);
334}
335
336static ssize_t sbi_dcache_threshold_store(struct kobject *kobj,
337					  struct sbi_attribute *attr,
338					  const char *buf, size_t len)
339{
340	int ret;
341	unsigned long threshold;
342	struct hmdfs_sb_info *sbi = to_sbi(kobj);
343
344	ret = kstrtoul(skip_spaces(buf), 0, &threshold);
345	if (!ret)
346		sbi->dcache_threshold = threshold;
347
348	return ret ? ret : len;
349}
350
351static struct sbi_attribute sbi_dcache_threshold_attr =
352	__ATTR(dcache_threshold, 0664, sbi_dcache_threshold_show,
353	       sbi_dcache_threshold_store);
354
355static ssize_t server_statistic_show(struct kobject *kobj,
356				     struct sbi_attribute *attr, char *buf)
357{
358	int i, ret;
359	const size_t size = PAGE_SIZE - 1;
360	ssize_t pos = 0;
361	struct server_statistic *stat = to_sbi(kobj)->s_server_statis;
362
363	for (i = 0; i < F_SIZE; i++) {
364
365		ret = snprintf(buf + pos, size - pos,
366			       "%llu %u %llu %llu\n",
367			       stat[i].cnt,
368			       jiffies_to_msecs(stat[i].max),
369			       stat[i].snd_cnt, stat[i].snd_fail_cnt);
370		if (ret > size - pos)
371			break;
372		pos += ret;
373	}
374
375	/* If break, we should add a new line */
376	if (i < F_SIZE) {
377		ret = snprintf(buf + pos, size + 1 - pos, "\n");
378		pos += ret;
379	}
380	return pos;
381}
382
383static struct sbi_attribute sbi_local_op_attr = __ATTR_RO(server_statistic);
384
385static ssize_t client_statistic_show(struct kobject *kobj,
386				     struct sbi_attribute *attr, char *buf)
387{
388	int i, ret;
389	const size_t size = PAGE_SIZE - 1;
390	ssize_t pos = 0;
391	struct client_statistic *stat = to_sbi(kobj)->s_client_statis;
392
393	for (i = 0; i < F_SIZE; i++) {
394
395		ret = snprintf(buf + pos, size - pos,
396			       "%llu %llu %llu %llu %llu %u\n",
397			       stat[i].snd_cnt,
398			       stat[i].snd_fail_cnt,
399			       stat[i].resp_cnt,
400			       stat[i].timeout_cnt,
401			       stat[i].delay_resp_cnt,
402			       jiffies_to_msecs(stat[i].max));
403		if (ret > size - pos)
404			break;
405		pos += ret;
406	}
407
408	/* If break, we should add a new line */
409	if (i < F_SIZE) {
410		ret = snprintf(buf + pos, size + 1 - pos, "\n");
411		pos += ret;
412	}
413
414	return pos;
415}
416
417static struct sbi_attribute sbi_delay_resp_attr = __ATTR_RO(client_statistic);
418
419static inline unsigned long pages_to_kbytes(unsigned long page)
420{
421	return page << (PAGE_SHIFT - 10);
422}
423
424static ssize_t dirty_writeback_stats_show(struct kobject *kobj,
425					  struct sbi_attribute *attr,
426					  char *buf)
427{
428	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
429	struct hmdfs_writeback *hwb = sbi->h_wb;
430	unsigned long avg;
431	unsigned long max;
432	unsigned long min;
433
434	spin_lock(&hwb->write_bandwidth_lock);
435	avg = hwb->avg_write_bandwidth;
436	max = hwb->max_write_bandwidth;
437	min = hwb->min_write_bandwidth;
438	spin_unlock(&hwb->write_bandwidth_lock);
439
440	if (min == ULONG_MAX)
441		min = 0;
442
443	return snprintf(buf, PAGE_SIZE,
444			"%10lu\n"
445			"%10lu\n"
446			"%10lu\n",
447			pages_to_kbytes(avg),
448			pages_to_kbytes(max),
449			pages_to_kbytes(min));
450}
451
452static struct sbi_attribute sbi_dirty_writeback_stats_attr =
453	__ATTR_RO(dirty_writeback_stats);
454
455static ssize_t sbi_wb_timeout_ms_show(struct kobject *kobj,
456				      struct sbi_attribute *attr,
457				      char *buf)
458{
459	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
460
461	return snprintf(buf, PAGE_SIZE, "%u\n", sbi->wb_timeout_ms);
462}
463
464static ssize_t sbi_wb_timeout_ms_store(struct kobject *kobj,
465				       struct sbi_attribute *attr,
466				       const char *buf, size_t len)
467{
468	struct hmdfs_sb_info *sbi = to_sbi(kobj);
469	unsigned int val;
470	int err;
471
472	err = kstrtouint(buf, 10, &val);
473	if (err)
474		return err;
475
476	if (!val || val > HMDFS_MAX_WB_TIMEOUT_MS)
477		return -EINVAL;
478
479	sbi->wb_timeout_ms = val;
480
481	return len;
482}
483
484static struct sbi_attribute sbi_wb_timeout_ms_attr =
485	__ATTR(wb_timeout_ms, 0664, sbi_wb_timeout_ms_show,
486	       sbi_wb_timeout_ms_store);
487
488static ssize_t sbi_dirty_writeback_centisecs_show(struct kobject *kobj,
489						  struct sbi_attribute *attr,
490						  char *buf)
491{
492	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
493
494	return snprintf(buf, PAGE_SIZE, "%u\n",
495			sbi->h_wb->dirty_writeback_interval);
496}
497
498static ssize_t sbi_dirty_writeback_centisecs_store(struct kobject *kobj,
499						   struct sbi_attribute *attr,
500						   const char *buf, size_t len)
501{
502	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
503	int err;
504
505	err = kstrtouint(buf, 10, &sbi->h_wb->dirty_writeback_interval);
506	if (err)
507		return err;
508	return len;
509}
510
511static struct sbi_attribute sbi_dirty_writeback_centisecs_attr =
512	__ATTR(dirty_writeback_centisecs, 0664,
513	       sbi_dirty_writeback_centisecs_show,
514	       sbi_dirty_writeback_centisecs_store);
515
516static ssize_t sbi_dirty_file_background_bytes_show(struct kobject *kobj,
517						    struct sbi_attribute *attr,
518						    char *buf)
519{
520	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
521
522	return snprintf(buf, PAGE_SIZE, "%lu\n",
523			sbi->h_wb->dirty_file_bg_bytes);
524}
525
526static ssize_t sbi_dirty_file_background_bytes_store(struct kobject *kobj,
527						     struct sbi_attribute *attr,
528						     const char *buf,
529						     size_t len)
530{
531	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
532	unsigned long file_background_bytes = 0;
533	int err;
534
535	err = kstrtoul(buf, 10, &file_background_bytes);
536	if (err)
537		return err;
538	if (file_background_bytes == 0)
539		return -EINVAL;
540
541	sbi->h_wb->dirty_fs_bytes =
542		max(sbi->h_wb->dirty_fs_bytes, file_background_bytes);
543	sbi->h_wb->dirty_fs_bg_bytes =
544		max(sbi->h_wb->dirty_fs_bg_bytes, file_background_bytes);
545	sbi->h_wb->dirty_file_bytes =
546		max(sbi->h_wb->dirty_file_bytes, file_background_bytes);
547
548	sbi->h_wb->dirty_file_bg_bytes = file_background_bytes;
549	hmdfs_calculate_dirty_thresh(sbi->h_wb);
550	hmdfs_update_ratelimit(sbi->h_wb);
551	return len;
552}
553
554static ssize_t sbi_dirty_fs_background_bytes_show(struct kobject *kobj,
555						  struct sbi_attribute *attr,
556						  char *buf)
557{
558	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
559
560	return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bg_bytes);
561}
562
563static ssize_t sbi_dirty_fs_background_bytes_store(struct kobject *kobj,
564						   struct sbi_attribute *attr,
565						   const char *buf, size_t len)
566{
567	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
568	unsigned long fs_background_bytes = 0;
569	int err;
570
571	err = kstrtoul(buf, 10, &fs_background_bytes);
572	if (err)
573		return err;
574	if (fs_background_bytes == 0)
575		return -EINVAL;
576
577	sbi->h_wb->dirty_file_bg_bytes =
578		min(sbi->h_wb->dirty_file_bg_bytes, fs_background_bytes);
579	sbi->h_wb->dirty_fs_bytes =
580		max(sbi->h_wb->dirty_fs_bytes, fs_background_bytes);
581
582	sbi->h_wb->dirty_fs_bg_bytes = fs_background_bytes;
583	hmdfs_calculate_dirty_thresh(sbi->h_wb);
584	hmdfs_update_ratelimit(sbi->h_wb);
585	return len;
586}
587
588static struct sbi_attribute sbi_dirty_file_background_bytes_attr =
589	__ATTR(dirty_file_background_bytes, 0644,
590	       sbi_dirty_file_background_bytes_show,
591	       sbi_dirty_file_background_bytes_store);
592static struct sbi_attribute sbi_dirty_fs_background_bytes_attr =
593	__ATTR(dirty_fs_background_bytes, 0644,
594	       sbi_dirty_fs_background_bytes_show,
595	       sbi_dirty_fs_background_bytes_store);
596
597static ssize_t sbi_dirty_file_bytes_show(struct kobject *kobj,
598					 struct sbi_attribute *attr, char *buf)
599{
600	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
601
602	return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_file_bytes);
603}
604
605static ssize_t sbi_dirty_file_bytes_store(struct kobject *kobj,
606					  struct sbi_attribute *attr,
607					  const char *buf, size_t len)
608{
609	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
610	unsigned long file_bytes = 0;
611	int err;
612
613	err = kstrtoul(buf, 10, &file_bytes);
614	if (err)
615		return err;
616	if (file_bytes == 0)
617		return -EINVAL;
618
619	sbi->h_wb->dirty_file_bg_bytes =
620		min(sbi->h_wb->dirty_file_bg_bytes, file_bytes);
621	sbi->h_wb->dirty_fs_bytes = max(sbi->h_wb->dirty_fs_bytes, file_bytes);
622
623	sbi->h_wb->dirty_file_bytes = file_bytes;
624	hmdfs_calculate_dirty_thresh(sbi->h_wb);
625	hmdfs_update_ratelimit(sbi->h_wb);
626	return len;
627}
628
629static ssize_t sbi_dirty_fs_bytes_show(struct kobject *kobj,
630				       struct sbi_attribute *attr, char *buf)
631{
632	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
633
634	return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->h_wb->dirty_fs_bytes);
635}
636
637static ssize_t sbi_dirty_fs_bytes_store(struct kobject *kobj,
638					struct sbi_attribute *attr,
639					const char *buf, size_t len)
640{
641	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
642	unsigned long fs_bytes = 0;
643	int err;
644
645	err = kstrtoul(buf, 10, &fs_bytes);
646	if (err)
647		return err;
648	if (fs_bytes == 0)
649		return -EINVAL;
650
651	sbi->h_wb->dirty_file_bg_bytes =
652		min(sbi->h_wb->dirty_file_bg_bytes, fs_bytes);
653	sbi->h_wb->dirty_file_bytes =
654		min(sbi->h_wb->dirty_file_bytes, fs_bytes);
655	sbi->h_wb->dirty_fs_bg_bytes =
656		min(sbi->h_wb->dirty_fs_bg_bytes, fs_bytes);
657
658	sbi->h_wb->dirty_fs_bytes = fs_bytes;
659	hmdfs_calculate_dirty_thresh(sbi->h_wb);
660	hmdfs_update_ratelimit(sbi->h_wb);
661	return len;
662}
663
664static struct sbi_attribute sbi_dirty_file_bytes_attr =
665	__ATTR(dirty_file_bytes, 0644, sbi_dirty_file_bytes_show,
666	       sbi_dirty_file_bytes_store);
667static struct sbi_attribute sbi_dirty_fs_bytes_attr =
668	__ATTR(dirty_fs_bytes, 0644, sbi_dirty_fs_bytes_show,
669	       sbi_dirty_fs_bytes_store);
670
671static ssize_t sbi_dirty_writeback_timelimit_show(struct kobject *kobj,
672						  struct sbi_attribute *attr,
673						  char *buf)
674{
675	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
676
677	return snprintf(buf, PAGE_SIZE, "%u\n",
678			sbi->h_wb->writeback_timelimit / HZ);
679}
680
681static ssize_t sbi_dirty_writeback_timelimit_store(struct kobject *kobj,
682						   struct sbi_attribute *attr,
683						   const char *buf,
684						   size_t len)
685{
686	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
687	unsigned int time_limit = 0;
688	int err;
689
690	err = kstrtouint(buf, 10, &time_limit);
691	if (err)
692		return err;
693	if (time_limit == 0 || time_limit > (HMDFS_MAX_WB_TIMELIMIT / HZ))
694		return -EINVAL;
695
696	sbi->h_wb->writeback_timelimit = time_limit * HZ;
697	return len;
698}
699
700static struct sbi_attribute sbi_dirty_writeback_timelimit_attr =
701__ATTR(dirty_writeback_timelimit, 0644, sbi_dirty_writeback_timelimit_show,
702	sbi_dirty_writeback_timelimit_store);
703
704static ssize_t sbi_dirty_thresh_lowerlimit_show(struct kobject *kobj,
705						struct sbi_attribute *attr,
706						char *buf)
707{
708	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
709
710	return snprintf(buf, PAGE_SIZE, "%lu\n",
711			sbi->h_wb->bw_thresh_lowerlimit << PAGE_SHIFT);
712}
713
714static ssize_t sbi_dirty_thresh_lowerlimit_store(struct kobject *kobj,
715						 struct sbi_attribute *attr,
716						 const char *buf,
717						 size_t len)
718{
719	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
720	unsigned long bw_thresh_lowerbytes = 0;
721	unsigned long bw_thresh_lowerlimit;
722	int err;
723
724	err = kstrtoul(buf, 10, &bw_thresh_lowerbytes);
725	if (err)
726		return err;
727
728	bw_thresh_lowerlimit = DIV_ROUND_UP(bw_thresh_lowerbytes, PAGE_SIZE);
729	if (bw_thresh_lowerlimit < HMDFS_BW_THRESH_MIN_LIMIT ||
730	    bw_thresh_lowerlimit > HMDFS_BW_THRESH_MAX_LIMIT)
731		return -EINVAL;
732
733	sbi->h_wb->bw_thresh_lowerlimit = bw_thresh_lowerlimit;
734	return len;
735}
736
737static struct sbi_attribute sbi_dirty_thresh_lowerlimit_attr =
738__ATTR(dirty_thresh_lowerlimit, 0644, sbi_dirty_thresh_lowerlimit_show,
739	sbi_dirty_thresh_lowerlimit_store);
740
741static ssize_t sbi_dirty_writeback_autothresh_show(struct kobject *kobj,
742						   struct sbi_attribute *attr,
743						   char *buf)
744{
745	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
746
747	return snprintf(buf, PAGE_SIZE, "%d\n",
748			sbi->h_wb->dirty_auto_threshold);
749}
750
751static ssize_t sbi_dirty_writeback_autothresh_store(struct kobject *kobj,
752						    struct sbi_attribute *attr,
753						    const char *buf,
754						    size_t len)
755{
756	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
757	bool dirty_auto_threshold = false;
758	int err;
759
760	err = kstrtobool(buf, &dirty_auto_threshold);
761	if (err)
762		return err;
763
764	sbi->h_wb->dirty_auto_threshold = dirty_auto_threshold;
765	return len;
766}
767
768static struct sbi_attribute sbi_dirty_writeback_autothresh_attr =
769__ATTR(dirty_writeback_autothresh, 0644, sbi_dirty_writeback_autothresh_show,
770	sbi_dirty_writeback_autothresh_store);
771
772static ssize_t sbi_dirty_writeback_control_show(struct kobject *kobj,
773						struct sbi_attribute *attr,
774						char *buf)
775{
776	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
777
778	return snprintf(buf, PAGE_SIZE, "%d\n",
779			sbi->h_wb->dirty_writeback_control);
780}
781
782static ssize_t sbi_dirty_writeback_control_store(struct kobject *kobj,
783						 struct sbi_attribute *attr,
784						 const char *buf, size_t len)
785{
786	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
787	unsigned int dirty_writeback_control = 0;
788	int err;
789
790	err = kstrtouint(buf, 10, &dirty_writeback_control);
791	if (err)
792		return err;
793
794	sbi->h_wb->dirty_writeback_control = (bool)dirty_writeback_control;
795	return len;
796}
797
798static struct sbi_attribute sbi_dirty_writeback_control_attr =
799	__ATTR(dirty_writeback_control, 0644, sbi_dirty_writeback_control_show,
800	       sbi_dirty_writeback_control_store);
801
802static ssize_t sbi_srv_dirty_thresh_show(struct kobject *kobj,
803						struct sbi_attribute *attr,
804						char *buf)
805{
806	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
807
808	return snprintf(buf, PAGE_SIZE, "%d\n",
809			sbi->h_swb->dirty_thresh_pg >> HMDFS_MB_TO_PAGE_SHIFT);
810}
811
812static ssize_t sbi_srv_dirty_thresh_store(struct kobject *kobj,
813						struct sbi_attribute *attr,
814						const char *buf,
815						size_t len)
816{
817	struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb;
818	int dirty_thresh_mb;
819	unsigned long long pages;
820	int err;
821
822	err = kstrtoint(buf, 10, &dirty_thresh_mb);
823	if (err)
824		return err;
825
826	if (dirty_thresh_mb <= 0)
827		return -EINVAL;
828
829	pages = dirty_thresh_mb;
830	pages <<= HMDFS_MB_TO_PAGE_SHIFT;
831	if (pages > INT_MAX) {
832		hmdfs_err("Illegal dirty_thresh_mb %d, its page count beyonds max int",
833			  dirty_thresh_mb);
834		return -EINVAL;
835	}
836
837	hswb->dirty_thresh_pg = (unsigned int)pages;
838	return len;
839}
840
841static struct sbi_attribute sbi_srv_dirty_thresh_attr =
842__ATTR(srv_dirty_thresh, 0644, sbi_srv_dirty_thresh_show,
843						sbi_srv_dirty_thresh_store);
844
845
846static ssize_t sbi_srv_dirty_wb_control_show(struct kobject *kobj,
847						struct sbi_attribute *attr,
848						char *buf)
849{
850	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
851
852	return snprintf(buf, PAGE_SIZE, "%d\n",
853			sbi->h_swb->dirty_writeback_control);
854}
855
856static ssize_t sbi_srv_dirty_wb_conctrol_store(struct kobject *kobj,
857					       struct sbi_attribute *attr,
858					       const char *buf,
859					       size_t len)
860{
861	struct hmdfs_server_writeback *hswb = to_sbi(kobj)->h_swb;
862	bool dirty_writeback_control = true;
863	int err;
864
865	err = kstrtobool(buf, &dirty_writeback_control);
866	if (err)
867		return err;
868
869	hswb->dirty_writeback_control = dirty_writeback_control;
870
871	return len;
872}
873
874static struct sbi_attribute sbi_srv_dirty_wb_control_attr =
875__ATTR(srv_dirty_writeback_control, 0644, sbi_srv_dirty_wb_control_show,
876					sbi_srv_dirty_wb_conctrol_store);
877
878static ssize_t sbi_dcache_timeout_show(struct kobject *kobj,
879				       struct sbi_attribute *attr, char *buf)
880{
881	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
882
883	return snprintf(buf, PAGE_SIZE, "%u\n", sbi->dcache_timeout);
884}
885
886static ssize_t sbi_dcache_timeout_store(struct kobject *kobj,
887					struct sbi_attribute *attr,
888					const char *buf, size_t len)
889{
890	struct hmdfs_sb_info *sbi = to_sbi(kobj);
891	unsigned int timeout;
892	int err;
893
894	err = kstrtouint(buf, 0, &timeout);
895	if (err)
896		return err;
897
898	/* zero is invalid, and it doesn't mean no cache */
899	if (timeout == 0 || timeout > MAX_DCACHE_TIMEOUT)
900		return -EINVAL;
901
902	sbi->dcache_timeout = timeout;
903
904	return len;
905}
906
907static struct sbi_attribute sbi_dcache_timeout_attr =
908	__ATTR(dcache_timeout, 0644, sbi_dcache_timeout_show,
909	       sbi_dcache_timeout_store);
910
911static ssize_t sbi_write_cache_timeout_sec_show(struct kobject *kobj,
912			struct sbi_attribute *attr, char *buf)
913{
914	return snprintf(buf, PAGE_SIZE, "%u\n",
915			to_sbi(kobj)->write_cache_timeout);
916}
917
918static ssize_t sbi_write_cache_timeout_sec_store(struct kobject *kobj,
919		struct sbi_attribute *attr, const char *buf, size_t len)
920{
921	int ret;
922	unsigned int timeout;
923	struct hmdfs_sb_info *sbi = to_sbi(kobj);
924
925	ret = kstrtouint(buf, 0, &timeout);
926	if (ret)
927		return ret;
928
929	/* set write_cache_timeout to 0 means this functionality is disabled */
930	sbi->write_cache_timeout = timeout;
931
932	return len;
933}
934
935static struct sbi_attribute sbi_write_cache_timeout_sec_attr =
936	__ATTR(write_cache_timeout_sec, 0664, sbi_write_cache_timeout_sec_show,
937	       sbi_write_cache_timeout_sec_store);
938
939static ssize_t sbi_node_evt_cb_delay_show(struct kobject *kobj,
940					  struct sbi_attribute *attr,
941					  char *buf)
942{
943	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
944
945	return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_cb_delay);
946}
947
948static ssize_t sbi_node_evt_cb_delay_store(struct kobject *kobj,
949					   struct sbi_attribute *attr,
950					   const char *buf,
951					   size_t len)
952{
953	struct hmdfs_sb_info *sbi = to_sbi(kobj);
954	unsigned int delay = 0;
955	int err;
956
957	err = kstrtouint(buf, 10, &delay);
958	if (err)
959		return err;
960
961	sbi->async_cb_delay = delay;
962
963	return len;
964}
965
966static struct sbi_attribute sbi_node_evt_cb_delay_attr =
967__ATTR(node_event_delay, 0644, sbi_node_evt_cb_delay_show,
968	sbi_node_evt_cb_delay_store);
969
970static int calc_idr_number(struct idr *idr)
971{
972	void *entry = NULL;
973	int id;
974	int number = 0;
975
976	idr_for_each_entry(idr, entry, id) {
977		number++;
978		if (number % HMDFS_IDR_RESCHED_COUNT == 0)
979			cond_resched();
980	}
981
982	return number;
983}
984
985static ssize_t sbi_show_idr_stats(struct kobject *kobj,
986				  struct sbi_attribute *attr,
987				  char *buf, bool showmsg)
988{
989	ssize_t size = 0;
990	int count;
991	struct hmdfs_sb_info *sbi = NULL;
992	struct hmdfs_peer *peer = NULL;
993	struct idr *idr = NULL;
994
995	sbi = to_sbi(kobj);
996
997	mutex_lock(&sbi->connections.node_lock);
998	list_for_each_entry(peer, &sbi->connections.node_list, list) {
999		idr = showmsg ? &peer->msg_idr : &peer->file_id_idr;
1000		count = calc_idr_number(idr);
1001		size += snprintf(buf + size, PAGE_SIZE - size,
1002				"device-id\tcount\tnext-id\n\t%llu\t\t%d\t%u\n",
1003				 peer->device_id, count, idr_get_cursor(idr));
1004		if (size >= PAGE_SIZE) {
1005			size = PAGE_SIZE;
1006			break;
1007		}
1008	}
1009	mutex_unlock(&sbi->connections.node_lock);
1010
1011	return size;
1012}
1013
1014static ssize_t pending_message_show(struct kobject *kobj,
1015				    struct sbi_attribute *attr,
1016				    char *buf)
1017{
1018	return sbi_show_idr_stats(kobj, attr, buf, true);
1019}
1020
1021static struct sbi_attribute sbi_pending_message_attr =
1022	__ATTR_RO(pending_message);
1023
1024static ssize_t peer_opened_fd_show(struct kobject *kobj,
1025				   struct sbi_attribute *attr, char *buf)
1026{
1027	return sbi_show_idr_stats(kobj, attr, buf, false);
1028}
1029
1030static struct sbi_attribute sbi_peer_opened_fd_attr = __ATTR_RO(peer_opened_fd);
1031
1032static ssize_t sbi_srv_req_max_active_attr_show(struct kobject *kobj,
1033						struct sbi_attribute *attr,
1034						char *buf)
1035{
1036	const struct hmdfs_sb_info *sbi = to_sbi(kobj);
1037
1038	return snprintf(buf, PAGE_SIZE, "%u\n", sbi->async_req_max_active);
1039}
1040
1041static ssize_t sbi_srv_req_max_active_attr_store(struct kobject *kobj,
1042		struct sbi_attribute *attr, const char *buf, size_t len)
1043{
1044	int ret;
1045	unsigned int max_active;
1046	struct hmdfs_sb_info *sbi = to_sbi(kobj);
1047
1048	ret = kstrtouint(buf, 0, &max_active);
1049	if (ret)
1050		return ret;
1051
1052	sbi->async_req_max_active = max_active;
1053
1054	return len;
1055}
1056
1057static struct sbi_attribute sbi_srv_req_max_active_attr =
1058__ATTR(srv_req_handle_max_active, 0644, sbi_srv_req_max_active_attr_show,
1059	sbi_srv_req_max_active_attr_store);
1060
1061
1062static ssize_t cache_file_show(struct hmdfs_sb_info *sbi,
1063			       struct list_head *head, char *buf)
1064{
1065	struct cache_file_node *cfn = NULL;
1066	ssize_t pos = 0;
1067
1068	mutex_lock(&sbi->cache_list_lock);
1069	list_for_each_entry(cfn, head, list) {
1070		pos += snprintf(buf + pos, PAGE_SIZE - pos,
1071			"dev_id: %s relative_path: %s\n",
1072			cfn->cid, cfn->relative_path);
1073		if (pos >= PAGE_SIZE) {
1074			pos = PAGE_SIZE;
1075			break;
1076		}
1077	}
1078	mutex_unlock(&sbi->cache_list_lock);
1079
1080	return pos;
1081}
1082
1083static ssize_t client_cache_file_show(struct kobject *kobj,
1084			struct sbi_attribute *attr, char *buf)
1085{
1086	return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->client_cache, buf);
1087}
1088static ssize_t server_cache_file_show(struct kobject *kobj,
1089			struct sbi_attribute *attr, char *buf)
1090{
1091	return cache_file_show(to_sbi(kobj), &to_sbi(kobj)->server_cache, buf);
1092}
1093
1094static struct sbi_attribute sbi_server_cache_file_attr =
1095	__ATTR_RO(server_cache_file);
1096static struct sbi_attribute sbi_client_cache_file_attr =
1097	__ATTR_RO(client_cache_file);
1098
1099static ssize_t sb_seq_show(struct kobject *kobj, struct sbi_attribute *attr,
1100			    char *buf)
1101{
1102	return snprintf(buf, PAGE_SIZE, "%u\n", to_sbi(kobj)->seq);
1103}
1104
1105static struct sbi_attribute sbi_seq_attr = __ATTR_RO(sb_seq);
1106
1107static ssize_t peers_sum_attr_show(struct kobject *kobj,
1108				   struct sbi_attribute *attr, char *buf)
1109{
1110	struct hmdfs_sb_info *sbi = to_sbi(kobj);
1111	struct hmdfs_peer *node = NULL;
1112	unsigned int stash_ok = 0, stash_fail = 0, restore_ok = 0,
1113	restore_fail = 0, rebuild_ok = 0, rebuild_fail = 0, rebuild_invalid = 0,
1114	rebuild_time = 0;
1115	unsigned long long stash_ok_pages = 0, stash_fail_pages = 0,
1116	restore_ok_pages = 0, restore_fail_pages = 0;
1117
1118	mutex_lock(&sbi->connections.node_lock);
1119	list_for_each_entry(node, &sbi->connections.node_list, list) {
1120		peer_get(node);
1121		mutex_unlock(&sbi->connections.node_lock);
1122		stash_ok += node->stats.stash.total_ok;
1123		stash_fail += node->stats.stash.total_fail;
1124		stash_ok_pages += node->stats.stash.ok_pages;
1125		stash_fail_pages += node->stats.stash.fail_pages;
1126		restore_ok += node->stats.restore.total_ok;
1127		restore_fail += node->stats.restore.total_fail;
1128		restore_ok_pages += node->stats.restore.ok_pages;
1129		restore_fail_pages += node->stats.restore.fail_pages;
1130		rebuild_ok += node->stats.rebuild.total_ok;
1131		rebuild_fail += node->stats.rebuild.total_fail;
1132		rebuild_invalid += node->stats.rebuild.total_invalid;
1133		rebuild_time += node->stats.rebuild.time;
1134		peer_put(node);
1135		mutex_lock(&sbi->connections.node_lock);
1136	}
1137	mutex_unlock(&sbi->connections.node_lock);
1138
1139	return snprintf(buf, PAGE_SIZE,
1140			"%u %u %llu %llu\n"
1141			"%u %u %llu %llu\n"
1142			"%u %u %u %u\n",
1143			stash_ok, stash_fail, stash_ok_pages, stash_fail_pages,
1144			restore_ok, restore_fail, restore_ok_pages,
1145			restore_fail_pages, rebuild_ok, rebuild_fail,
1146			rebuild_invalid, rebuild_time);
1147}
1148
1149static struct sbi_attribute sbi_peers_attr = __ATTR_RO(peers_sum_attr);
1150
1151const char * const flag_name[] = {
1152	"READPAGES",
1153	"READPAGES_OPEN",
1154	"ATOMIC_OPEN",
1155};
1156
1157static ssize_t fill_features(char *buf, unsigned long long flag)
1158{
1159	int i;
1160	ssize_t pos = 0;
1161	bool sep = false;
1162	int flag_name_count = ARRAY_SIZE(flag_name) / sizeof(flag_name[0]);
1163
1164	for (i = 0; i < sizeof(flag) * BITS_PER_BYTE; ++i) {
1165		if (!(flag & BIT(i)))
1166			continue;
1167
1168		if (sep)
1169			pos += snprintf(buf + pos, PAGE_SIZE - pos, "|");
1170		sep = true;
1171
1172		if (pos >= PAGE_SIZE) {
1173			pos = PAGE_SIZE;
1174			break;
1175		}
1176
1177		if (i < flag_name_count && flag_name[i])
1178			pos += snprintf(buf + pos, PAGE_SIZE - pos, "%s",
1179					flag_name[i]);
1180		else
1181			pos += snprintf(buf + pos, PAGE_SIZE - pos, "%d", i);
1182
1183		if (pos >= PAGE_SIZE) {
1184			pos = PAGE_SIZE;
1185			break;
1186		}
1187	}
1188	pos += snprintf(buf + pos, PAGE_SIZE - pos, "\n");
1189	if (pos >= PAGE_SIZE)
1190		pos = PAGE_SIZE;
1191
1192	return pos;
1193}
1194
1195static ssize_t sbi_features_show(struct kobject *kobj,
1196				 struct sbi_attribute *attr, char *buf)
1197{
1198	struct hmdfs_sb_info *sbi = to_sbi(kobj);
1199
1200	return fill_features(buf, sbi->s_features);
1201}
1202
1203static struct sbi_attribute sbi_features_attr = __ATTR(features, 0444,
1204	sbi_features_show, NULL);
1205
1206static struct attribute *sbi_attrs[] = {
1207	&sbi_cmd_attr.attr,
1208	&sbi_status_attr.attr,
1209	&sbi_statistic_attr.attr,
1210	&sbi_dcache_precision_attr.attr,
1211	&sbi_dcache_threshold_attr.attr,
1212	&sbi_dcache_timeout_attr.attr,
1213	&sbi_write_cache_timeout_sec_attr.attr,
1214	&sbi_local_op_attr.attr,
1215	&sbi_delay_resp_attr.attr,
1216	&sbi_wb_timeout_ms_attr.attr,
1217	&sbi_dirty_writeback_centisecs_attr.attr,
1218	&sbi_dirty_file_background_bytes_attr.attr,
1219	&sbi_dirty_fs_background_bytes_attr.attr,
1220	&sbi_dirty_file_bytes_attr.attr,
1221	&sbi_dirty_fs_bytes_attr.attr,
1222	&sbi_dirty_writeback_autothresh_attr.attr,
1223	&sbi_dirty_writeback_timelimit_attr.attr,
1224	&sbi_dirty_thresh_lowerlimit_attr.attr,
1225	&sbi_dirty_writeback_control_attr.attr,
1226	&sbi_dirty_writeback_stats_attr.attr,
1227	&sbi_srv_dirty_thresh_attr.attr,
1228	&sbi_srv_dirty_wb_control_attr.attr,
1229	&sbi_node_evt_cb_delay_attr.attr,
1230	&sbi_srv_req_max_active_attr.attr,
1231	&sbi_pending_message_attr.attr,
1232	&sbi_peer_opened_fd_attr.attr,
1233	&sbi_server_cache_file_attr.attr,
1234	&sbi_client_cache_file_attr.attr,
1235	&sbi_seq_attr.attr,
1236	&sbi_peers_attr.attr,
1237	&sbi_features_attr.attr,
1238	NULL,
1239};
1240ATTRIBUTE_GROUPS(sbi);
1241
1242static ssize_t sbi_attr_show(struct kobject *kobj, struct attribute *attr,
1243			     char *buf)
1244{
1245	struct sbi_attribute *sbi_attr = to_sbi_attr(attr);
1246
1247	if (!sbi_attr->show)
1248		return -EIO;
1249	return sbi_attr->show(kobj, sbi_attr, buf);
1250}
1251
1252static ssize_t sbi_attr_store(struct kobject *kobj, struct attribute *attr,
1253			      const char *buf, size_t len)
1254{
1255	struct sbi_attribute *sbi_attr = to_sbi_attr(attr);
1256
1257	if (!sbi_attr->store)
1258		return -EIO;
1259	return sbi_attr->store(kobj, sbi_attr, buf, len);
1260}
1261
1262static const struct sysfs_ops sbi_sysfs_ops = {
1263	.show = sbi_attr_show,
1264	.store = sbi_attr_store,
1265};
1266
1267static void sbi_release(struct kobject *kobj)
1268{
1269	struct hmdfs_sb_info *sbi = to_sbi(kobj);
1270
1271	complete(&sbi->s_kobj_unregister);
1272}
1273
1274static struct kobj_type sbi_ktype = {
1275	.sysfs_ops = &sbi_sysfs_ops,
1276	.default_groups = sbi_groups,
1277	.release = sbi_release,
1278};
1279
1280static inline struct sbi_cmd_attribute *to_sbi_cmd_attr(struct attribute *x)
1281{
1282	return container_of(x, struct sbi_cmd_attribute, attr);
1283}
1284
1285static inline struct hmdfs_sb_info *cmd_kobj_to_sbi(struct kobject *x)
1286{
1287	return container_of(x, struct hmdfs_sb_info, s_cmd_timeout_kobj);
1288}
1289
1290static ssize_t cmd_timeout_show(struct kobject *kobj, struct attribute *attr,
1291				char *buf)
1292{
1293	int cmd = to_sbi_cmd_attr(attr)->command;
1294	struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj);
1295
1296	if (cmd < 0 || cmd >= F_SIZE)
1297		return 0;
1298
1299	return snprintf(buf, PAGE_SIZE, "%u\n", get_cmd_timeout(sbi, cmd));
1300}
1301
1302static ssize_t cmd_timeout_store(struct kobject *kobj, struct attribute *attr,
1303				 const char *buf, size_t len)
1304{
1305	unsigned int value;
1306	int cmd = to_sbi_cmd_attr(attr)->command;
1307	int ret = kstrtouint(skip_spaces(buf), 0, &value);
1308	struct hmdfs_sb_info *sbi = cmd_kobj_to_sbi(kobj);
1309
1310	if (cmd < 0 || cmd >= F_SIZE)
1311		return -EINVAL;
1312
1313	if (!ret)
1314		set_cmd_timeout(sbi, cmd, value);
1315
1316	return ret ? ret : len;
1317}
1318
1319#define HMDFS_CMD_ATTR(_name, _cmd)                                            \
1320	static struct sbi_cmd_attribute hmdfs_attr_##_name = {                 \
1321		.attr = { .name = __stringify(_name), .mode = 0664 },          \
1322		.command = (_cmd),                                             \
1323	}
1324
1325HMDFS_CMD_ATTR(open, F_OPEN);
1326HMDFS_CMD_ATTR(release, F_RELEASE);
1327HMDFS_CMD_ATTR(readpage, F_READPAGE);
1328HMDFS_CMD_ATTR(writepage, F_WRITEPAGE);
1329HMDFS_CMD_ATTR(iterate, F_ITERATE);
1330HMDFS_CMD_ATTR(rmdir, F_RMDIR);
1331HMDFS_CMD_ATTR(unlink, F_UNLINK);
1332HMDFS_CMD_ATTR(rename, F_RENAME);
1333HMDFS_CMD_ATTR(setattr, F_SETATTR);
1334HMDFS_CMD_ATTR(statfs, F_STATFS);
1335HMDFS_CMD_ATTR(drop_push, F_DROP_PUSH);
1336HMDFS_CMD_ATTR(getattr, F_GETATTR);
1337HMDFS_CMD_ATTR(fsync, F_FSYNC);
1338HMDFS_CMD_ATTR(syncfs, F_SYNCFS);
1339HMDFS_CMD_ATTR(getxattr, F_GETXATTR);
1340HMDFS_CMD_ATTR(setxattr, F_SETXATTR);
1341HMDFS_CMD_ATTR(listxattr, F_LISTXATTR);
1342
1343#define ATTR_LIST(_name) (&hmdfs_attr_##_name.attr)
1344
1345static struct attribute *sbi_timeout_attrs[] = {
1346	ATTR_LIST(open),     ATTR_LIST(release),
1347	ATTR_LIST(readpage), ATTR_LIST(writepage),
1348	ATTR_LIST(iterate),  ATTR_LIST(rmdir),
1349	ATTR_LIST(unlink),   ATTR_LIST(rename),
1350	ATTR_LIST(setattr),
1351	ATTR_LIST(statfs),   ATTR_LIST(drop_push),
1352	ATTR_LIST(getattr),  ATTR_LIST(fsync),
1353	ATTR_LIST(syncfs),   ATTR_LIST(getxattr),
1354	ATTR_LIST(setxattr), ATTR_LIST(listxattr),
1355	NULL
1356};
1357ATTRIBUTE_GROUPS(sbi_timeout);
1358
1359static const struct sysfs_ops sbi_cmd_sysfs_ops = {
1360	.show = cmd_timeout_show,
1361	.store = cmd_timeout_store,
1362};
1363
1364static void sbi_timeout_release(struct kobject *kobj)
1365{
1366	struct hmdfs_sb_info *sbi = container_of(kobj, struct hmdfs_sb_info,
1367						 s_cmd_timeout_kobj);
1368
1369	complete(&sbi->s_timeout_kobj_unregister);
1370}
1371
1372static struct kobj_type sbi_timeout_ktype = {
1373	.sysfs_ops = &sbi_cmd_sysfs_ops,
1374	.default_groups = sbi_timeout_groups,
1375	.release = sbi_timeout_release,
1376};
1377
1378void hmdfs_release_sysfs(struct hmdfs_sb_info *sbi)
1379{
1380	kobject_put(&sbi->s_cmd_timeout_kobj);
1381	wait_for_completion(&sbi->s_timeout_kobj_unregister);
1382	kobject_put(&sbi->kobj);
1383	wait_for_completion(&sbi->s_kobj_unregister);
1384}
1385
1386int hmdfs_register_sysfs(const char *name, struct hmdfs_sb_info *sbi)
1387{
1388	int ret;
1389	struct kobject *kobj = NULL;
1390
1391	mutex_lock(&hmdfs_sysfs_mutex);
1392	kobj = kset_find_obj(hmdfs_kset, name);
1393	if (kobj) {
1394		hmdfs_err("mount failed, already exist");
1395		kobject_put(kobj);
1396		mutex_unlock(&hmdfs_sysfs_mutex);
1397		return -EEXIST;
1398	}
1399
1400	sbi->kobj.kset = hmdfs_kset;
1401	init_completion(&sbi->s_kobj_unregister);
1402	ret = kobject_init_and_add(&sbi->kobj, &sbi_ktype,
1403				   &hmdfs_kset->kobj, "%s", name);
1404	sysfs_change_owner(&sbi->kobj, KUIDT_INIT(1000), KGIDT_INIT(1000));
1405	mutex_unlock(&hmdfs_sysfs_mutex);
1406
1407	if (ret) {
1408		kobject_put(&sbi->kobj);
1409		wait_for_completion(&sbi->s_kobj_unregister);
1410		return ret;
1411	}
1412
1413	init_completion(&sbi->s_timeout_kobj_unregister);
1414	ret = kobject_init_and_add(&sbi->s_cmd_timeout_kobj, &sbi_timeout_ktype,
1415				   &sbi->kobj, "cmd_timeout");
1416	if (ret) {
1417		hmdfs_release_sysfs(sbi);
1418		return ret;
1419	}
1420
1421	kobject_uevent(&sbi->kobj, KOBJ_ADD);
1422	return 0;
1423}
1424
1425void hmdfs_unregister_sysfs(struct hmdfs_sb_info *sbi)
1426{
1427	kobject_del(&sbi->s_cmd_timeout_kobj);
1428	kobject_del(&sbi->kobj);
1429}
1430
1431static inline int to_sysfs_fmt_evt(unsigned int evt)
1432{
1433	return evt == RAW_NODE_EVT_NR ? -1 : evt;
1434}
1435
1436static ssize_t features_show(struct kobject *kobj, struct peer_attribute *attr,
1437			     char *buf)
1438{
1439	struct hmdfs_peer *peer = to_peer(kobj);
1440
1441	return fill_features(buf, peer->features);
1442}
1443
1444static ssize_t event_show(struct kobject *kobj, struct peer_attribute *attr,
1445			  char *buf)
1446{
1447	struct hmdfs_peer *peer = to_peer(kobj);
1448
1449	return snprintf(buf, PAGE_SIZE,
1450			"cur_async evt %d seq %u\n"
1451			"cur_sync evt %d seq %u\n"
1452			"pending evt %d seq %u\n"
1453			"merged evt %u\n"
1454			"dup_drop evt %u %u\n"
1455			"waiting evt %u %u\n"
1456			"seq_tbl %u %u %u %u\n"
1457			"seq_rd_idx %u\n"
1458			"seq_wr_idx %u\n",
1459			to_sysfs_fmt_evt(peer->cur_evt[0]),
1460			peer->cur_evt_seq[0],
1461			to_sysfs_fmt_evt(peer->cur_evt[1]),
1462			peer->cur_evt_seq[1],
1463			to_sysfs_fmt_evt(peer->pending_evt),
1464			peer->pending_evt_seq,
1465			peer->merged_evt,
1466			peer->dup_evt[RAW_NODE_EVT_OFF],
1467			peer->dup_evt[RAW_NODE_EVT_ON],
1468			peer->waiting_evt[RAW_NODE_EVT_OFF],
1469			peer->waiting_evt[RAW_NODE_EVT_ON],
1470			peer->seq_tbl[0], peer->seq_tbl[1], peer->seq_tbl[2],
1471			peer->seq_tbl[3],
1472			peer->seq_rd_idx % RAW_NODE_EVT_MAX_NR,
1473			peer->seq_wr_idx % RAW_NODE_EVT_MAX_NR);
1474}
1475
1476static ssize_t stash_show(struct kobject *kobj, struct peer_attribute *attr,
1477			  char *buf)
1478{
1479	struct hmdfs_peer *peer = to_peer(kobj);
1480
1481	return snprintf(buf, PAGE_SIZE,
1482			"cur_ok %u\n"
1483			"cur_nothing %u\n"
1484			"cur_fail %u\n"
1485			"total_ok %u\n"
1486			"total_nothing %u\n"
1487			"total_fail %u\n"
1488			"ok_pages %llu\n"
1489			"fail_pages %llu\n",
1490			peer->stats.stash.cur_ok,
1491			peer->stats.stash.cur_nothing,
1492			peer->stats.stash.cur_fail,
1493			peer->stats.stash.total_ok,
1494			peer->stats.stash.total_nothing,
1495			peer->stats.stash.total_fail,
1496			peer->stats.stash.ok_pages,
1497			peer->stats.stash.fail_pages);
1498}
1499
1500static ssize_t restore_show(struct kobject *kobj, struct peer_attribute *attr,
1501			    char *buf)
1502{
1503	struct hmdfs_peer *peer = to_peer(kobj);
1504
1505	return snprintf(buf, PAGE_SIZE,
1506			"cur_ok %u\n"
1507			"cur_fail %u\n"
1508			"cur_keep %u\n"
1509			"total_ok %u\n"
1510			"total_fail %u\n"
1511			"total_keep %u\n"
1512			"ok_pages %llu\n"
1513			"fail_pages %llu\n",
1514			peer->stats.restore.cur_ok,
1515			peer->stats.restore.cur_fail,
1516			peer->stats.restore.cur_keep,
1517			peer->stats.restore.total_ok,
1518			peer->stats.restore.total_fail,
1519			peer->stats.restore.total_keep,
1520			peer->stats.restore.ok_pages,
1521			peer->stats.restore.fail_pages);
1522}
1523
1524static ssize_t rebuild_show(struct kobject *kobj, struct peer_attribute *attr,
1525			    char *buf)
1526{
1527	struct hmdfs_peer *peer = to_peer(kobj);
1528
1529	return snprintf(buf, PAGE_SIZE,
1530			"cur_ok %u\n"
1531			"cur_fail %u\n"
1532			"cur_invalid %u\n"
1533			"total_ok %u\n"
1534			"total_fail %u\n"
1535			"total_invalid %u\n"
1536			"time %u\n",
1537			peer->stats.rebuild.cur_ok,
1538			peer->stats.rebuild.cur_fail,
1539			peer->stats.rebuild.cur_invalid,
1540			peer->stats.rebuild.total_ok,
1541			peer->stats.rebuild.total_fail,
1542			peer->stats.rebuild.total_invalid,
1543			peer->stats.rebuild.time);
1544}
1545
1546static struct peer_attribute peer_features_attr = __ATTR_RO(features);
1547static struct peer_attribute peer_event_attr = __ATTR_RO(event);
1548static struct peer_attribute peer_stash_attr = __ATTR_RO(stash);
1549static struct peer_attribute peer_restore_attr = __ATTR_RO(restore);
1550static struct peer_attribute peer_rebuild_attr = __ATTR_RO(rebuild);
1551
1552static struct attribute *peer_attrs[] = {
1553	&peer_features_attr.attr,
1554	&peer_event_attr.attr,
1555	&peer_stash_attr.attr,
1556	&peer_restore_attr.attr,
1557	&peer_rebuild_attr.attr,
1558	NULL,
1559};
1560ATTRIBUTE_GROUPS(peer);
1561
1562static ssize_t peer_attr_show(struct kobject *kobj, struct attribute *attr,
1563			      char *buf)
1564{
1565	struct peer_attribute *peer_attr = to_peer_attr(attr);
1566
1567	if (!peer_attr->show)
1568		return -EIO;
1569	return peer_attr->show(kobj, peer_attr, buf);
1570}
1571
1572static ssize_t peer_attr_store(struct kobject *kobj, struct attribute *attr,
1573			       const char *buf, size_t len)
1574{
1575	struct peer_attribute *peer_attr = to_peer_attr(attr);
1576
1577	if (!peer_attr->store)
1578		return -EIO;
1579	return peer_attr->store(kobj, peer_attr, buf, len);
1580}
1581
1582static const struct sysfs_ops peer_sysfs_ops = {
1583	.show = peer_attr_show,
1584	.store = peer_attr_store,
1585};
1586
1587static void peer_sysfs_release(struct kobject *kobj)
1588{
1589	struct hmdfs_peer *peer = to_peer(kobj);
1590
1591	complete(&peer->kobj_unregister);
1592}
1593
1594static struct kobj_type peer_ktype = {
1595	.sysfs_ops = &peer_sysfs_ops,
1596	.default_groups = peer_groups,
1597	.release = peer_sysfs_release,
1598};
1599
1600int hmdfs_register_peer_sysfs(struct hmdfs_sb_info *sbi,
1601			      struct hmdfs_peer *peer)
1602{
1603	int err = 0;
1604
1605	init_completion(&peer->kobj_unregister);
1606	err = kobject_init_and_add(&peer->kobj, &peer_ktype, &sbi->kobj,
1607				   "peer_%llu", peer->device_id);
1608	return err;
1609}
1610
1611void hmdfs_release_peer_sysfs(struct hmdfs_peer *peer)
1612{
1613	kobject_del(&peer->kobj);
1614	kobject_put(&peer->kobj);
1615	wait_for_completion(&peer->kobj_unregister);
1616}
1617
1618void notify(struct hmdfs_peer *node, struct notify_param *param)
1619{
1620	struct hmdfs_sb_info *sbi = node->sbi;
1621	int in_len;
1622
1623	if (!param)
1624		return;
1625	spin_lock(&sbi->notify_fifo_lock);
1626	in_len =
1627		kfifo_in(&sbi->notify_fifo, param, sizeof(struct notify_param));
1628	spin_unlock(&sbi->notify_fifo_lock);
1629	if (in_len != sizeof(struct notify_param))
1630		return;
1631	sysfs_notify(&sbi->kobj, NULL, "cmd");
1632}
1633
1634int hmdfs_sysfs_init(void)
1635{
1636	hmdfs_kset = kset_create_and_add("hmdfs", NULL, fs_kobj);
1637	if (!hmdfs_kset)
1638		return -ENOMEM;
1639
1640	return 0;
1641}
1642
1643void hmdfs_sysfs_exit(void)
1644{
1645	kset_unregister(hmdfs_kset);
1646	hmdfs_kset = NULL;
1647}
1648