1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/hmdfs/hmdfs_client.c
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #include "hmdfs_client.h"
9 #include "hmdfs_server.h"
10
11 #include <linux/highmem.h>
12 #include <linux/sched/signal.h>
13 #include <linux/statfs.h>
14
15 #include "comm/socket_adapter.h"
16 #include "hmdfs_dentryfile.h"
17 #include "hmdfs_trace.h"
18 #include "comm/node_cb.h"
19 #include "stash.h"
20 #include "authority/authentication.h"
21
22 #define HMDFS_SYNC_WPAGE_RETRY_MS 2000
23
free_sm_outbuf(struct hmdfs_send_command *sm)24 static inline void free_sm_outbuf(struct hmdfs_send_command *sm)
25 {
26 if (sm->out_buf && sm->out_len != 0)
27 kfree(sm->out_buf);
28 sm->out_len = 0;
29 sm->out_buf = NULL;
30 }
31
hmdfs_send_open(struct hmdfs_peer *con, const char *send_buf, __u8 file_type, struct hmdfs_open_ret *open_ret)32 int hmdfs_send_open(struct hmdfs_peer *con, const char *send_buf,
33 __u8 file_type, struct hmdfs_open_ret *open_ret)
34 {
35 int ret;
36 int path_len = strlen(send_buf);
37 size_t send_len = sizeof(struct open_request) + path_len + 1;
38 struct open_request *open_req = kzalloc(send_len, GFP_KERNEL);
39 struct open_response *resp;
40 struct hmdfs_send_command sm = {
41 .data = open_req,
42 .len = send_len,
43 .out_buf = NULL,
44 .local_filp = NULL,
45 };
46 hmdfs_init_cmd(&sm.operations, F_OPEN);
47
48 if (!open_req) {
49 ret = -ENOMEM;
50 goto out;
51 }
52 open_req->file_type = file_type;
53 open_req->path_len = cpu_to_le32(path_len);
54 strcpy(open_req->buf, send_buf);
55 ret = hmdfs_sendmessage_request(con, &sm);
56 kfree(open_req);
57
58 if (!ret && (sm.out_len == 0 || !sm.out_buf))
59 ret = -ENOENT;
60 if (ret)
61 goto out;
62 resp = sm.out_buf;
63
64 open_ret->ino = le64_to_cpu(resp->ino);
65 open_ret->fid.ver = le64_to_cpu(resp->file_ver);
66 open_ret->fid.id = le32_to_cpu(resp->file_id);
67 open_ret->file_size = le64_to_cpu(resp->file_size);
68 open_ret->remote_ctime.tv_sec = le64_to_cpu(resp->ctime);
69 open_ret->remote_ctime.tv_nsec = le32_to_cpu(resp->ctime_nsec);
70 open_ret->stable_ctime.tv_sec = le64_to_cpu(resp->stable_ctime);
71 open_ret->stable_ctime.tv_nsec = le32_to_cpu(resp->stable_ctime_nsec);
72
73 out:
74 free_sm_outbuf(&sm);
75 return ret;
76 }
77
hmdfs_send_close(struct hmdfs_peer *con, const struct hmdfs_fid *fid)78 void hmdfs_send_close(struct hmdfs_peer *con, const struct hmdfs_fid *fid)
79 {
80 size_t send_len = sizeof(struct release_request);
81 struct release_request *release_req = kzalloc(send_len, GFP_KERNEL);
82 struct hmdfs_send_command sm = {
83 .data = release_req,
84 .len = send_len,
85 .local_filp = NULL,
86 };
87 hmdfs_init_cmd(&sm.operations, F_RELEASE);
88
89 if (!release_req)
90 return;
91
92 release_req->file_ver = cpu_to_le64(fid->ver);
93 release_req->file_id = cpu_to_le32(fid->id);
94
95 hmdfs_sendmessage_request(con, &sm);
96 kfree(release_req);
97 }
98
hmdfs_send_fsync(struct hmdfs_peer *con, const struct hmdfs_fid *fid, __s64 start, __s64 end, __s32 datasync)99 int hmdfs_send_fsync(struct hmdfs_peer *con, const struct hmdfs_fid *fid,
100 __s64 start, __s64 end, __s32 datasync)
101 {
102 int ret;
103 struct fsync_request *fsync_req =
104 kzalloc(sizeof(struct fsync_request), GFP_KERNEL);
105 struct hmdfs_send_command sm = {
106 .data = fsync_req,
107 .len = sizeof(struct fsync_request),
108 .out_buf = NULL,
109 .local_filp = NULL,
110 };
111
112 hmdfs_init_cmd(&sm.operations, F_FSYNC);
113 if (!fsync_req)
114 return -ENOMEM;
115
116 fsync_req->file_ver = cpu_to_le64(fid->ver);
117 fsync_req->file_id = cpu_to_le32(fid->id);
118 fsync_req->datasync = cpu_to_le32(datasync);
119 fsync_req->start = cpu_to_le64(start);
120 fsync_req->end = cpu_to_le64(end);
121
122 ret = hmdfs_sendmessage_request(con, &sm);
123
124 free_sm_outbuf(&sm);
125 kfree(fsync_req);
126 return ret;
127 }
128
hmdfs_client_readpage(struct hmdfs_peer *con, const struct hmdfs_fid *fid, struct page *page)129 int hmdfs_client_readpage(struct hmdfs_peer *con, const struct hmdfs_fid *fid,
130 struct page *page)
131 {
132 int ret;
133 size_t send_len = sizeof(struct readpage_request);
134 struct readpage_request *read_data = kzalloc(send_len, GFP_KERNEL);
135 struct hmdfs_send_command sm = {
136 .data = read_data,
137 .len = send_len,
138 .local_filp = NULL,
139 };
140
141 hmdfs_init_cmd(&sm.operations, F_READPAGE);
142 if (!read_data) {
143 unlock_page(page);
144 return -ENOMEM;
145 }
146
147 sm.out_buf = page;
148 read_data->file_ver = cpu_to_le64(fid->ver);
149 read_data->file_id = cpu_to_le32(fid->id);
150 read_data->size = cpu_to_le32(HMDFS_PAGE_SIZE);
151 read_data->index = cpu_to_le64(page->index);
152 ret = hmdfs_sendpage_request(con, &sm);
153 kfree(read_data);
154 return ret;
155 }
156
hmdfs_usr_sig_pending(struct task_struct *p)157 bool hmdfs_usr_sig_pending(struct task_struct *p)
158 {
159 sigset_t *sig = &p->pending.signal;
160
161 if (likely(!signal_pending(p)))
162 return false;
163 return sigismember(sig, SIGINT) || sigismember(sig, SIGTERM) ||
164 sigismember(sig, SIGKILL);
165 }
166
hmdfs_client_writepage_done(struct hmdfs_inode_info *info, struct hmdfs_writepage_context *ctx)167 void hmdfs_client_writepage_done(struct hmdfs_inode_info *info,
168 struct hmdfs_writepage_context *ctx)
169 {
170 struct page *page = ctx->page;
171 bool unlock = ctx->rsem_held;
172
173 SetPageUptodate(page);
174 end_page_writeback(page);
175 if (unlock)
176 up_read(&info->wpage_sem);
177 unlock_page(page);
178 }
179
hmdfs_client_writepage_err(struct hmdfs_peer *peer, struct hmdfs_inode_info *info, struct hmdfs_writepage_context *ctx, int err)180 static void hmdfs_client_writepage_err(struct hmdfs_peer *peer,
181 struct hmdfs_inode_info *info,
182 struct hmdfs_writepage_context *ctx,
183 int err)
184 {
185 struct page *page = ctx->page;
186 bool unlock = ctx->rsem_held;
187
188 if (err == -ENOMEM || err == -EAGAIN || err == -ESHUTDOWN ||
189 err == -ETIME)
190 SetPageUptodate(page);
191 else
192 hmdfs_info("Page %ld of file %u writeback err %d devid %llu",
193 page->index, ctx->fid.id, err, peer->device_id);
194
195 /*
196 * Current and subsequent writebacks have been canceled by the
197 * user, leaving these pages' states in chaos. Read pages in
198 * the future to update these pages.
199 */
200 if (ctx->sync_all && hmdfs_usr_sig_pending(ctx->caller))
201 ClearPageUptodate(page);
202
203 if (ctx->sync_all || !time_is_after_eq_jiffies(ctx->timeout) ||
204 !(err == -ETIME || hmdfs_need_redirty_page(info, err))) {
205 SetPageError(page);
206 mapping_set_error(page->mapping, -EIO);
207 } else {
208 __set_page_dirty_nobuffers(page);
209 }
210
211 end_page_writeback(page);
212 if (unlock)
213 up_read(&info->wpage_sem);
214 unlock_page(page);
215 }
216
217 static inline bool
hmdfs_no_timedout_sync_write(struct hmdfs_writepage_context *ctx)218 hmdfs_no_timedout_sync_write(struct hmdfs_writepage_context *ctx)
219 {
220 return ctx->sync_all && time_is_after_eq_jiffies(ctx->timeout);
221 }
222
223 static inline bool
hmdfs_client_rewrite_for_timeout(struct hmdfs_writepage_context *ctx, int err)224 hmdfs_client_rewrite_for_timeout(struct hmdfs_writepage_context *ctx, int err)
225 {
226 return (err == -ETIME && hmdfs_no_timedout_sync_write(ctx) &&
227 !hmdfs_usr_sig_pending(ctx->caller));
228 }
229
230 static inline bool
hmdfs_client_rewrite_for_offline(struct hmdfs_sb_info *sbi, struct hmdfs_writepage_context *ctx, int err)231 hmdfs_client_rewrite_for_offline(struct hmdfs_sb_info *sbi,
232 struct hmdfs_writepage_context *ctx, int err)
233 {
234 struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
235 unsigned int status = READ_ONCE(info->stash_status);
236
237 /*
238 * No retry if offline occurs during inode restoration.
239 *
240 * Do retry if local file cache is ready even it is not
241 * a WB_SYNC_ALL write, else no-sync_all writeback will
242 * return -EIO, mapping_set_error(mapping, -EIO) will be
243 * called and it will make the concurrent calling of
244 * filemap_write_and_wait() in hmdfs_flush_stash_file_data()
245 * return -EIO.
246 */
247 return (hmdfs_is_stash_enabled(sbi) &&
248 status != HMDFS_REMOTE_INODE_RESTORING &&
249 (hmdfs_no_timedout_sync_write(ctx) ||
250 status == HMDFS_REMOTE_INODE_STASHING) &&
251 hmdfs_is_offline_or_timeout_err(err));
252 }
253
254 static inline bool
hmdfs_client_redo_writepage(struct hmdfs_sb_info *sbi, struct hmdfs_writepage_context *ctx, int err)255 hmdfs_client_redo_writepage(struct hmdfs_sb_info *sbi,
256 struct hmdfs_writepage_context *ctx, int err)
257 {
258 return hmdfs_client_rewrite_for_timeout(ctx, err) ||
259 hmdfs_client_rewrite_for_offline(sbi, ctx, err);
260 }
261
hmdfs_remote_write_to_remote(struct hmdfs_inode_info *info)262 static bool hmdfs_remote_write_to_remote(struct hmdfs_inode_info *info)
263 {
264 unsigned int status = READ_ONCE(info->stash_status);
265 bool stashing;
266
267 if (status != HMDFS_REMOTE_INODE_STASHING)
268 return true;
269
270 /* Ensure it's OK to use info->cache afterwards */
271 spin_lock(&info->stash_lock);
272 stashing = (info->stash_status == HMDFS_REMOTE_INODE_STASHING);
273 spin_unlock(&info->stash_lock);
274
275 return !stashing;
276 }
277
hmdfs_remote_do_writepage(struct hmdfs_peer *con, struct hmdfs_writepage_context *ctx)278 int hmdfs_remote_do_writepage(struct hmdfs_peer *con,
279 struct hmdfs_writepage_context *ctx)
280 {
281 struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
282 bool to_remote = false;
283 int err = 0;
284
285 to_remote = hmdfs_remote_write_to_remote(info);
286 if (to_remote)
287 err = hmdfs_client_writepage(info->conn, ctx);
288 else
289 err = hmdfs_stash_writepage(info->conn, ctx);
290 if (!err)
291 return 0;
292
293 if (!(to_remote &&
294 hmdfs_client_rewrite_for_offline(con->sbi, ctx, err)))
295 return err;
296
297 queue_delayed_work(con->retry_wb_wq, &ctx->retry_dwork,
298 msecs_to_jiffies(HMDFS_SYNC_WPAGE_RETRY_MS));
299
300 return 0;
301 }
302
hmdfs_remote_writepage_retry(struct work_struct *work)303 void hmdfs_remote_writepage_retry(struct work_struct *work)
304 {
305 struct hmdfs_writepage_context *ctx =
306 container_of(work, struct hmdfs_writepage_context,
307 retry_dwork.work);
308 struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
309 struct hmdfs_peer *peer = info->conn;
310 const struct cred *old_cred = NULL;
311 int err;
312
313 old_cred = hmdfs_override_creds(peer->sbi->cred);
314 err = hmdfs_remote_do_writepage(peer, ctx);
315 hmdfs_revert_creds(old_cred);
316 if (err) {
317 hmdfs_client_writepage_err(peer, info, ctx, err);
318 put_task_struct(ctx->caller);
319 kfree(ctx);
320 }
321 }
322
hmdfs_writepage_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req, const struct hmdfs_resp *resp)323 void hmdfs_writepage_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req,
324 const struct hmdfs_resp *resp)
325 {
326 struct hmdfs_writepage_context *ctx = req->private;
327 struct hmdfs_inode_info *info = hmdfs_i(ctx->page->mapping->host);
328 int ret = resp->ret_code;
329 unsigned long page_index = ctx->page->index;
330
331 trace_hmdfs_writepage_cb_enter(peer, info->remote_ino, page_index, ret);
332
333 if (!ret) {
334 hmdfs_client_writepage_done(info, ctx);
335 atomic64_inc(&info->write_counter);
336 goto cleanup_all;
337 }
338
339 if (hmdfs_client_redo_writepage(peer->sbi, ctx, ret)) {
340 ret = hmdfs_remote_do_writepage(peer, ctx);
341 if (!ret)
342 goto cleanup_req;
343 WARN_ON(ret == -ETIME);
344 }
345
346 hmdfs_client_writepage_err(peer, info, ctx, ret);
347
348 cleanup_all:
349 put_task_struct(ctx->caller);
350 kfree(ctx);
351 cleanup_req:
352 kfree(req->data);
353
354 trace_hmdfs_writepage_cb_exit(peer, info->remote_ino, page_index, ret);
355 }
356
hmdfs_client_writepage(struct hmdfs_peer *con, struct hmdfs_writepage_context *param)357 int hmdfs_client_writepage(struct hmdfs_peer *con,
358 struct hmdfs_writepage_context *param)
359 {
360 int ret = 0;
361 size_t send_len = sizeof(struct writepage_request) + HMDFS_PAGE_SIZE;
362 struct writepage_request *write_data = kzalloc(send_len, GFP_NOFS);
363 struct hmdfs_req req;
364 char *data = NULL;
365
366 if (unlikely(!write_data))
367 return -ENOMEM;
368
369 WARN_ON(!PageLocked(param->page)); // VFS
370 WARN_ON(PageDirty(param->page)); // VFS
371 WARN_ON(!PageWriteback(param->page)); // hmdfs
372
373 write_data->file_ver = cpu_to_le64(param->fid.ver);
374 write_data->file_id = cpu_to_le32(param->fid.id);
375 write_data->index = cpu_to_le64(param->page->index);
376 write_data->count = cpu_to_le32(param->count);
377 data = kmap(param->page);
378 memcpy((char *)write_data->buf, data, HMDFS_PAGE_SIZE);
379 kunmap(param->page);
380 req.data = write_data;
381 req.data_len = send_len;
382
383 req.private = param;
384 req.private_len = sizeof(*param);
385
386 req.timeout = TIMEOUT_CONFIG;
387 hmdfs_init_cmd(&req.operations, F_WRITEPAGE);
388 ret = hmdfs_send_async_request(con, &req);
389 if (unlikely(ret))
390 kfree(write_data);
391 return ret;
392 }
393
hmdfs_client_recv_readpage(struct hmdfs_head_cmd *head, int err, struct hmdfs_async_work *async_work)394 void hmdfs_client_recv_readpage(struct hmdfs_head_cmd *head, int err,
395 struct hmdfs_async_work *async_work)
396 {
397 struct page *page = async_work->page;
398 int ret = le32_to_cpu(head->ret_code);
399 struct hmdfs_inode_info *info = hmdfs_i(page->mapping->host);
400 unsigned long page_index = page->index;
401
402 if (!err)
403 SetPageUptodate(page);
404 else if (err == -EBADF)
405 /* There may be a stale fd caused by fid version, need reopen */
406 set_bit(HMDFS_FID_NEED_OPEN, &info->fid_flags);
407
408 hmdfs_client_resp_statis(async_work->head.peer->sbi, F_READPAGE,
409 HMDFS_RESP_NORMAL, async_work->start, jiffies);
410
411 trace_hmdfs_client_recv_readpage(async_work->head.peer,
412 info->remote_ino, page_index, ret);
413
414 asw_done(async_work);
415 }
416
417 /* read cache dentry file at path and write them into filp */
hmdfs_client_start_readdir(struct hmdfs_peer *con, struct file *filp, const char *path, int path_len, struct hmdfs_dcache_header *header)418 int hmdfs_client_start_readdir(struct hmdfs_peer *con, struct file *filp,
419 const char *path, int path_len,
420 struct hmdfs_dcache_header *header)
421 {
422 int ret;
423 size_t send_len = sizeof(struct readdir_request) + path_len + 1;
424 struct readdir_request *req = kzalloc(send_len, GFP_KERNEL);
425 struct hmdfs_send_command sm = {
426 .data = req,
427 .len = send_len,
428 .local_filp = filp,
429 };
430
431 hmdfs_init_cmd(&sm.operations, F_ITERATE);
432 if (!req)
433 return -ENOMEM;
434
435 /* add ref or it will be release at msg put */
436 get_file(sm.local_filp);
437 req->path_len = cpu_to_le32(path_len);
438 strncpy(req->path, path, path_len);
439
440 /*
441 * Is we already have a cache file, verify it. If it is
442 * uptodate, then we don't have to transfer a new one
443 */
444 if (header) {
445 req->dcache_crtime = header->dcache_crtime;
446 req->dcache_crtime_nsec = header->dcache_crtime_nsec;
447 req->dentry_ctime = header->dentry_ctime;
448 req->dentry_ctime_nsec = header->dentry_ctime_nsec;
449 req->num = header->num;
450 req->verify_cache = cpu_to_le32(1);
451 }
452
453 ret = hmdfs_sendmessage_request(con, &sm);
454 kfree(req);
455 return ret;
456 }
457
hmdfs_client_start_mkdir(struct hmdfs_peer *con, const char *path, const char *name, umode_t mode, struct hmdfs_lookup_ret *mkdir_ret)458 int hmdfs_client_start_mkdir(struct hmdfs_peer *con,
459 const char *path, const char *name,
460 umode_t mode, struct hmdfs_lookup_ret *mkdir_ret)
461 {
462 int ret = 0;
463 int path_len = strlen(path);
464 int name_len = strlen(name);
465 size_t send_len = sizeof(struct mkdir_request) + path_len + 1 +
466 name_len + 1;
467 struct mkdir_request *mkdir_req = kzalloc(send_len, GFP_KERNEL);
468 struct hmdfs_inodeinfo_response *resp = NULL;
469 struct hmdfs_send_command sm = {
470 .data = mkdir_req,
471 .len = send_len,
472 .out_buf = NULL,
473 .local_filp = NULL,
474 };
475
476 hmdfs_init_cmd(&sm.operations, F_MKDIR);
477 if (!mkdir_req)
478 return -ENOMEM;
479
480 mkdir_req->path_len = cpu_to_le32(path_len);
481 mkdir_req->name_len = cpu_to_le32(name_len);
482 mkdir_req->mode = cpu_to_le16(mode);
483 strncpy(mkdir_req->path, path, path_len);
484 strncpy(mkdir_req->path + path_len + 1, name, name_len);
485
486 ret = hmdfs_sendmessage_request(con, &sm);
487 if (ret == -ENOENT || ret == -ETIME || ret == -EOPNOTSUPP)
488 goto out;
489 if (!sm.out_buf) {
490 ret = -ENOENT;
491 goto out;
492 }
493 resp = sm.out_buf;
494 mkdir_ret->i_mode = le16_to_cpu(resp->i_mode);
495 mkdir_ret->i_size = le64_to_cpu(resp->i_size);
496 mkdir_ret->i_mtime = le64_to_cpu(resp->i_mtime);
497 mkdir_ret->i_mtime_nsec = le32_to_cpu(resp->i_mtime_nsec);
498 mkdir_ret->i_ino = le64_to_cpu(resp->i_ino);
499
500 out:
501 free_sm_outbuf(&sm);
502 kfree(mkdir_req);
503 return ret;
504 }
505
hmdfs_client_start_create(struct hmdfs_peer *con, const char *path, const char *name, umode_t mode, bool want_excl, struct hmdfs_lookup_ret *create_ret)506 int hmdfs_client_start_create(struct hmdfs_peer *con,
507 const char *path, const char *name,
508 umode_t mode, bool want_excl,
509 struct hmdfs_lookup_ret *create_ret)
510 {
511 int ret = 0;
512 int path_len = strlen(path);
513 int name_len = strlen(name);
514 size_t send_len = sizeof(struct create_request) + path_len + 1 +
515 name_len + 1;
516 struct create_request *create_req = kzalloc(send_len, GFP_KERNEL);
517 struct hmdfs_inodeinfo_response *resp = NULL;
518 struct hmdfs_send_command sm = {
519 .data = create_req,
520 .len = send_len,
521 .out_buf = NULL,
522 .local_filp = NULL,
523 };
524
525 hmdfs_init_cmd(&sm.operations, F_CREATE);
526 if (!create_req)
527 return -ENOMEM;
528
529 create_req->path_len = cpu_to_le32(path_len);
530 create_req->name_len = cpu_to_le32(name_len);
531 create_req->mode = cpu_to_le16(mode);
532 create_req->want_excl = want_excl;
533 strncpy(create_req->path, path, path_len);
534 strncpy(create_req->path + path_len + 1, name, name_len);
535
536 ret = hmdfs_sendmessage_request(con, &sm);
537 if (ret == -ENOENT || ret == -ETIME || ret == -EOPNOTSUPP)
538 goto out;
539 if (!sm.out_buf) {
540 ret = -ENOENT;
541 goto out;
542 }
543 resp = sm.out_buf;
544 create_ret->i_mode = le16_to_cpu(resp->i_mode);
545 create_ret->i_size = le64_to_cpu(resp->i_size);
546 create_ret->i_mtime = le64_to_cpu(resp->i_mtime);
547 create_ret->i_mtime_nsec = le32_to_cpu(resp->i_mtime_nsec);
548 create_ret->i_ino = le64_to_cpu(resp->i_ino);
549
550 out:
551 free_sm_outbuf(&sm);
552 kfree(create_req);
553 return ret;
554 }
555
hmdfs_client_start_rmdir(struct hmdfs_peer *con, const char *path, const char *name)556 int hmdfs_client_start_rmdir(struct hmdfs_peer *con, const char *path,
557 const char *name)
558 {
559 int ret;
560 int path_len = strlen(path);
561 int name_len = strlen(name);
562 size_t send_len = sizeof(struct rmdir_request) + path_len + 1 +
563 name_len + 1;
564 struct rmdir_request *rmdir_req = kzalloc(send_len, GFP_KERNEL);
565 struct hmdfs_send_command sm = {
566 .data = rmdir_req,
567 .len = send_len,
568 .out_buf = NULL,
569 .local_filp = NULL,
570 };
571
572 hmdfs_init_cmd(&sm.operations, F_RMDIR);
573 if (!rmdir_req)
574 return -ENOMEM;
575
576 rmdir_req->path_len = cpu_to_le32(path_len);
577 rmdir_req->name_len = cpu_to_le32(name_len);
578 strncpy(rmdir_req->path, path, path_len);
579 strncpy(rmdir_req->path + path_len + 1, name, name_len);
580
581 ret = hmdfs_sendmessage_request(con, &sm);
582 free_sm_outbuf(&sm);
583 kfree(rmdir_req);
584 return ret;
585 }
586
hmdfs_client_start_unlink(struct hmdfs_peer *con, const char *path, const char *name)587 int hmdfs_client_start_unlink(struct hmdfs_peer *con, const char *path,
588 const char *name)
589 {
590 int ret;
591 int path_len = strlen(path);
592 int name_len = strlen(name);
593 size_t send_len = sizeof(struct unlink_request) + path_len + 1 +
594 name_len + 1;
595 struct unlink_request *unlink_req = kzalloc(send_len, GFP_KERNEL);
596 struct hmdfs_send_command sm = {
597 .data = unlink_req,
598 .len = send_len,
599 .out_buf = NULL,
600 .local_filp = NULL,
601 };
602
603 hmdfs_init_cmd(&sm.operations, F_UNLINK);
604 if (!unlink_req)
605 return -ENOMEM;
606
607 unlink_req->path_len = cpu_to_le32(path_len);
608 unlink_req->name_len = cpu_to_le32(name_len);
609 strncpy(unlink_req->path, path, path_len);
610 strncpy(unlink_req->path + path_len + 1, name, name_len);
611
612 ret = hmdfs_sendmessage_request(con, &sm);
613 kfree(unlink_req);
614 free_sm_outbuf(&sm);
615 return ret;
616 }
617
hmdfs_client_start_rename(struct hmdfs_peer *con, const char *old_path, const char *old_name, const char *new_path, const char *new_name, unsigned int flags)618 int hmdfs_client_start_rename(struct hmdfs_peer *con, const char *old_path,
619 const char *old_name, const char *new_path,
620 const char *new_name, unsigned int flags)
621 {
622 int ret;
623 int old_path_len = strlen(old_path);
624 int new_path_len = strlen(new_path);
625 int old_name_len = strlen(old_name);
626 int new_name_len = strlen(new_name);
627
628 size_t send_len = sizeof(struct rename_request) + old_path_len + 1 +
629 new_path_len + 1 + old_name_len + 1 + new_name_len +
630 1;
631 struct rename_request *rename_req = kzalloc(send_len, GFP_KERNEL);
632 struct hmdfs_send_command sm = {
633 .data = rename_req,
634 .len = send_len,
635 .out_buf = NULL,
636 .local_filp = NULL,
637 };
638
639 hmdfs_init_cmd(&sm.operations, F_RENAME);
640 if (!rename_req)
641 return -ENOMEM;
642
643 rename_req->old_path_len = cpu_to_le32(old_path_len);
644 rename_req->new_path_len = cpu_to_le32(new_path_len);
645 rename_req->old_name_len = cpu_to_le32(old_name_len);
646 rename_req->new_name_len = cpu_to_le32(new_name_len);
647 rename_req->flags = cpu_to_le32(flags);
648
649 strncpy(rename_req->path, old_path, old_path_len);
650 strncpy(rename_req->path + old_path_len + 1, new_path, new_path_len);
651
652 strncpy(rename_req->path + old_path_len + 1 + new_path_len + 1,
653 old_name, old_name_len);
654 strncpy(rename_req->path + old_path_len + 1 + new_path_len + 1 +
655 old_name_len + 1,
656 new_name, new_name_len);
657
658 ret = hmdfs_sendmessage_request(con, &sm);
659 free_sm_outbuf(&sm);
660 kfree(rename_req);
661 return ret;
662 }
663
hmdfs_send_setattr(struct hmdfs_peer *con, const char *send_buf, struct setattr_info *attr_info)664 int hmdfs_send_setattr(struct hmdfs_peer *con, const char *send_buf,
665 struct setattr_info *attr_info)
666 {
667 int ret;
668 int path_len = strlen(send_buf);
669 size_t send_len = path_len + 1 + sizeof(struct setattr_request);
670 struct setattr_request *setattr_req = kzalloc(send_len, GFP_KERNEL);
671 struct hmdfs_send_command sm = {
672 .data = setattr_req,
673 .len = send_len,
674 .local_filp = NULL,
675 };
676
677 hmdfs_init_cmd(&sm.operations, F_SETATTR);
678 if (!setattr_req)
679 return -ENOMEM;
680
681 strcpy(setattr_req->buf, send_buf);
682 setattr_req->path_len = cpu_to_le32(path_len);
683 setattr_req->valid = cpu_to_le32(attr_info->valid);
684 setattr_req->size = cpu_to_le64(attr_info->size);
685 setattr_req->mtime = cpu_to_le64(attr_info->mtime);
686 setattr_req->mtime_nsec = cpu_to_le32(attr_info->mtime_nsec);
687 ret = hmdfs_sendmessage_request(con, &sm);
688 kfree(setattr_req);
689 return ret;
690 }
691
hmdfs_update_getattr_ret(struct getattr_response *resp, struct hmdfs_getattr_ret *result)692 static void hmdfs_update_getattr_ret(struct getattr_response *resp,
693 struct hmdfs_getattr_ret *result)
694 {
695 struct kstat *stat = &result->stat;
696
697 stat->result_mask = le32_to_cpu(resp->result_mask);
698 if (stat->result_mask == 0)
699 return;
700
701 stat->ino = le64_to_cpu(resp->ino);
702 stat->mode = le16_to_cpu(resp->mode);
703 stat->nlink = le32_to_cpu(resp->nlink);
704 stat->uid.val = le32_to_cpu(resp->uid);
705 stat->gid.val = le32_to_cpu(resp->gid);
706 stat->size = le64_to_cpu(resp->size);
707 stat->blocks = le64_to_cpu(resp->blocks);
708 stat->blksize = le32_to_cpu(resp->blksize);
709 stat->atime.tv_sec = le64_to_cpu(resp->atime);
710 stat->atime.tv_nsec = le32_to_cpu(resp->atime_nsec);
711 stat->mtime.tv_sec = le64_to_cpu(resp->mtime);
712 stat->mtime.tv_nsec = le32_to_cpu(resp->mtime_nsec);
713 stat->ctime.tv_sec = le64_to_cpu(resp->ctime);
714 stat->ctime.tv_nsec = le32_to_cpu(resp->ctime_nsec);
715 stat->btime.tv_sec = le64_to_cpu(resp->crtime);
716 stat->btime.tv_nsec = le32_to_cpu(resp->crtime_nsec);
717 result->fsid = le64_to_cpu(resp->fsid);
718 /* currently not used */
719 result->i_flags = 0;
720 }
721
hmdfs_send_getattr(struct hmdfs_peer *con, const char *send_buf, unsigned int lookup_flags, struct hmdfs_getattr_ret *result)722 int hmdfs_send_getattr(struct hmdfs_peer *con, const char *send_buf,
723 unsigned int lookup_flags,
724 struct hmdfs_getattr_ret *result)
725 {
726 int path_len = strlen(send_buf);
727 size_t send_len = path_len + 1 + sizeof(struct getattr_request);
728 int ret = 0;
729 struct getattr_request *req = kzalloc(send_len, GFP_KERNEL);
730 struct hmdfs_send_command sm = {
731 .data = req,
732 .len = send_len,
733 .out_buf = NULL,
734 .local_filp = NULL,
735 };
736
737 hmdfs_init_cmd(&sm.operations, F_GETATTR);
738 if (!req)
739 return -ENOMEM;
740
741 req->path_len = cpu_to_le32(path_len);
742 req->lookup_flags = cpu_to_le32(lookup_flags);
743 strncpy(req->buf, send_buf, path_len);
744 ret = hmdfs_sendmessage_request(con, &sm);
745 if (!ret && (sm.out_len == 0 || !sm.out_buf))
746 ret = -ENOENT;
747 if (ret)
748 goto out;
749
750 hmdfs_update_getattr_ret(sm.out_buf, result);
751
752 out:
753 kfree(req);
754 free_sm_outbuf(&sm);
755 return ret;
756 }
757
hmdfs_update_statfs_ret(struct statfs_response *resp, struct kstatfs *buf)758 static void hmdfs_update_statfs_ret(struct statfs_response *resp,
759 struct kstatfs *buf)
760 {
761 buf->f_type = le64_to_cpu(resp->f_type);
762 buf->f_bsize = le64_to_cpu(resp->f_bsize);
763 buf->f_blocks = le64_to_cpu(resp->f_blocks);
764 buf->f_bfree = le64_to_cpu(resp->f_bfree);
765 buf->f_bavail = le64_to_cpu(resp->f_bavail);
766 buf->f_files = le64_to_cpu(resp->f_files);
767 buf->f_ffree = le64_to_cpu(resp->f_ffree);
768 buf->f_fsid.val[0] = le32_to_cpu(resp->f_fsid_0);
769 buf->f_fsid.val[1] = le32_to_cpu(resp->f_fsid_1);
770 buf->f_namelen = le64_to_cpu(resp->f_namelen);
771 buf->f_frsize = le64_to_cpu(resp->f_frsize);
772 buf->f_flags = le64_to_cpu(resp->f_flags);
773 buf->f_spare[0] = le64_to_cpu(resp->f_spare_0);
774 buf->f_spare[1] = le64_to_cpu(resp->f_spare_1);
775 buf->f_spare[2] = le64_to_cpu(resp->f_spare_2);
776 buf->f_spare[3] = le64_to_cpu(resp->f_spare_3);
777 }
778
hmdfs_send_statfs(struct hmdfs_peer *con, const char *path, struct kstatfs *buf)779 int hmdfs_send_statfs(struct hmdfs_peer *con, const char *path,
780 struct kstatfs *buf)
781 {
782 int ret;
783 int path_len = strlen(path);
784 size_t send_len = sizeof(struct statfs_request) + path_len + 1;
785 struct statfs_request *req = kzalloc(send_len, GFP_KERNEL);
786 struct hmdfs_send_command sm = {
787 .data = req,
788 .len = send_len,
789 .out_buf = NULL,
790 .local_filp = NULL,
791 };
792
793 hmdfs_init_cmd(&sm.operations, F_STATFS);
794 if (!req)
795 return -ENOMEM;
796
797 req->path_len = cpu_to_le32(path_len);
798 strncpy(req->path, path, path_len);
799
800 ret = hmdfs_sendmessage_request(con, &sm);
801
802 if (ret == -ETIME)
803 ret = -EIO;
804 if (!ret && (sm.out_len == 0 || !sm.out_buf))
805 ret = -ENOENT;
806 if (ret)
807 goto out;
808
809 hmdfs_update_statfs_ret(sm.out_buf, buf);
810 out:
811 kfree(req);
812 free_sm_outbuf(&sm);
813 return ret;
814 }
815
hmdfs_send_syncfs(struct hmdfs_peer *con, int syncfs_timeout)816 int hmdfs_send_syncfs(struct hmdfs_peer *con, int syncfs_timeout)
817 {
818 int ret;
819 struct hmdfs_req req;
820 struct hmdfs_sb_info *sbi = con->sbi;
821 struct syncfs_request *syncfs_req =
822 kzalloc(sizeof(struct syncfs_request), GFP_KERNEL);
823
824 if (!syncfs_req) {
825 hmdfs_err("cannot allocate syncfs_request");
826 return -ENOMEM;
827 }
828
829 hmdfs_init_cmd(&req.operations, F_SYNCFS);
830 req.timeout = syncfs_timeout;
831
832 syncfs_req->version = cpu_to_le64(sbi->hsi.version);
833 req.data = syncfs_req;
834 req.data_len = sizeof(*syncfs_req);
835
836 ret = hmdfs_send_async_request(con, &req);
837 if (ret) {
838 kfree(syncfs_req);
839 hmdfs_err("ret fail with %d", ret);
840 }
841
842 return ret;
843 }
844
hmdfs_update_getxattr_ret(struct getxattr_response *resp, void *value, size_t o_size, int *ret)845 static void hmdfs_update_getxattr_ret(struct getxattr_response *resp,
846 void *value, size_t o_size, int *ret)
847 {
848 ssize_t size = le32_to_cpu(resp->size);
849
850 if (o_size && o_size < size) {
851 *ret = -ERANGE;
852 return;
853 }
854
855 if (o_size)
856 memcpy(value, resp->value, size);
857
858 *ret = size;
859 }
860
hmdfs_send_getxattr(struct hmdfs_peer *con, const char *send_buf, const char *name, void *value, size_t size)861 int hmdfs_send_getxattr(struct hmdfs_peer *con, const char *send_buf,
862 const char *name, void *value, size_t size)
863 {
864 size_t path_len = strlen(send_buf);
865 size_t name_len = strlen(name);
866 size_t send_len = path_len + name_len +
867 sizeof(struct getxattr_request) + 2;
868 int ret = 0;
869 struct getxattr_request *req = kzalloc(send_len, GFP_KERNEL);
870 struct hmdfs_send_command sm = {
871 .data = req,
872 .len = send_len,
873 .out_buf = NULL,
874 .local_filp = NULL,
875 };
876
877 hmdfs_init_cmd(&sm.operations, F_GETXATTR);
878 if (!req)
879 return -ENOMEM;
880
881 req->path_len = cpu_to_le32(path_len);
882 req->name_len = cpu_to_le32(name_len);
883 req->size = cpu_to_le32(size);
884 strncpy(req->buf, send_buf, path_len);
885 strncpy(req->buf + path_len + 1, name, name_len);
886 ret = hmdfs_sendmessage_request(con, &sm);
887 if (!ret && (sm.out_len == 0 || !sm.out_buf))
888 ret = -ENOENT;
889 if (ret)
890 goto out;
891
892 hmdfs_update_getxattr_ret(sm.out_buf, value, size, &ret);
893
894 out:
895 kfree(req);
896 free_sm_outbuf(&sm);
897 return ret;
898 }
899
hmdfs_send_setxattr(struct hmdfs_peer *con, const char *send_buf, const char *name, const void *value, size_t size, int flags)900 int hmdfs_send_setxattr(struct hmdfs_peer *con, const char *send_buf,
901 const char *name, const void *value,
902 size_t size, int flags)
903 {
904 size_t path_len = strlen(send_buf);
905 size_t name_len = strlen(name);
906 size_t send_len = path_len + name_len + size + 2 +
907 sizeof(struct setxattr_request);
908 int ret = 0;
909 struct setxattr_request *req = kzalloc(send_len, GFP_KERNEL);
910 struct hmdfs_send_command sm = {
911 .data = req,
912 .len = send_len,
913 .local_filp = NULL,
914 };
915
916 hmdfs_init_cmd(&sm.operations, F_SETXATTR);
917 if (!req)
918 return -ENOMEM;
919
920 req->path_len = cpu_to_le32(path_len);
921 req->name_len = cpu_to_le32(name_len);
922 req->size = cpu_to_le32(size);
923 req->flags = cpu_to_le32(flags);
924 strncpy(req->buf, send_buf, path_len);
925 strncpy(req->buf + path_len + 1, name, name_len);
926 if (!value)
927 req->del = true;
928 else
929 memcpy(req->buf + path_len + name_len + 2, value, size);
930
931 ret = hmdfs_sendmessage_request(con, &sm);
932 kfree(req);
933 return ret;
934 }
935
hmdfs_update_listxattr_ret(struct listxattr_response *resp, char *list, size_t o_size, ssize_t *ret)936 static void hmdfs_update_listxattr_ret(struct listxattr_response *resp,
937 char *list, size_t o_size, ssize_t *ret)
938 {
939 ssize_t size = le32_to_cpu(resp->size);
940
941 if (o_size && o_size < size) {
942 *ret = -ERANGE;
943 return;
944 }
945
946 /* multi name split with '\0', use memcpy */
947 if (o_size)
948 memcpy(list, resp->list, size);
949
950 *ret = size;
951 }
952
hmdfs_send_listxattr(struct hmdfs_peer *con, const char *send_buf, char *list, size_t size)953 ssize_t hmdfs_send_listxattr(struct hmdfs_peer *con, const char *send_buf,
954 char *list, size_t size)
955 {
956 size_t path_len = strlen(send_buf);
957 size_t send_len = path_len + 1 + sizeof(struct listxattr_request);
958 ssize_t ret = 0;
959 struct listxattr_request *req = kzalloc(send_len, GFP_KERNEL);
960 struct hmdfs_send_command sm = {
961 .data = req,
962 .len = send_len,
963 .out_buf = NULL,
964 .local_filp = NULL,
965 };
966
967 hmdfs_init_cmd(&sm.operations, F_LISTXATTR);
968 if (!req)
969 return -ENOMEM;
970
971 req->path_len = cpu_to_le32(path_len);
972 req->size = cpu_to_le32(size);
973 strncpy(req->buf, send_buf, path_len);
974 ret = hmdfs_sendmessage_request(con, &sm);
975 if (!ret && (sm.out_len == 0 || !sm.out_buf))
976 ret = -ENOENT;
977 if (ret)
978 goto out;
979
980 hmdfs_update_listxattr_ret(sm.out_buf, list, size, &ret);
981
982 out:
983 kfree(req);
984 free_sm_outbuf(&sm);
985 return ret;
986 }
987
hmdfs_recv_syncfs_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req, const struct hmdfs_resp *resp)988 void hmdfs_recv_syncfs_cb(struct hmdfs_peer *peer, const struct hmdfs_req *req,
989 const struct hmdfs_resp *resp)
990 {
991 struct hmdfs_sb_info *sbi = peer->sbi;
992 struct syncfs_request *syncfs_req = (struct syncfs_request *)req->data;
993
994 WARN_ON(!syncfs_req);
995 spin_lock(&sbi->hsi.v_lock);
996 if (le64_to_cpu(syncfs_req->version) != sbi->hsi.version) {
997 hmdfs_info(
998 "Recv stale syncfs resp[ver: %llu] from device %llu, current ver %llu",
999 le64_to_cpu(syncfs_req->version), peer->device_id,
1000 sbi->hsi.version);
1001 spin_unlock(&sbi->hsi.v_lock);
1002 goto out;
1003 }
1004
1005 if (!sbi->hsi.remote_ret)
1006 sbi->hsi.remote_ret = resp->ret_code;
1007
1008 if (resp->ret_code) {
1009 hmdfs_err("Recv syncfs error code %d from device %llu",
1010 resp->ret_code, peer->device_id);
1011 } else {
1012 /*
1013 * Set @sb_dirty_count to zero if no one else produce
1014 * dirty data on remote server during remote sync.
1015 */
1016 atomic64_cmpxchg(&peer->sb_dirty_count,
1017 peer->old_sb_dirty_count, 0);
1018 }
1019
1020 atomic_dec(&sbi->hsi.wait_count);
1021 spin_unlock(&sbi->hsi.v_lock);
1022 wake_up_interruptible(&sbi->hsi.wq);
1023
1024 out:
1025 kfree(syncfs_req);
1026 }
1027
hmdfs_send_drop_push(struct hmdfs_peer *con, const char *path)1028 void hmdfs_send_drop_push(struct hmdfs_peer *con, const char *path)
1029 {
1030 int path_len = strlen(path);
1031 size_t send_len = sizeof(struct drop_push_request) + path_len + 1;
1032 struct drop_push_request *dp_req = kzalloc(send_len, GFP_KERNEL);
1033 struct hmdfs_send_command sm = {
1034 .data = dp_req,
1035 .len = send_len,
1036 .local_filp = NULL,
1037 };
1038
1039 hmdfs_init_cmd(&sm.operations, F_DROP_PUSH);
1040 if (!dp_req)
1041 return;
1042
1043 dp_req->path_len = cpu_to_le32(path_len);
1044 strncpy(dp_req->path, path, path_len);
1045
1046 hmdfs_sendmessage_request(con, &sm);
1047 kfree(dp_req);
1048 }
1049
hmdfs_get_msg_next(struct hmdfs_peer *peer, int *id)1050 static void *hmdfs_get_msg_next(struct hmdfs_peer *peer, int *id)
1051 {
1052 struct hmdfs_msg_idr_head *head = NULL;
1053
1054 spin_lock(&peer->idr_lock);
1055 head = idr_get_next(&peer->msg_idr, id);
1056 if (head && head->type < MSG_IDR_MAX && head->type >= 0)
1057 kref_get(&head->ref);
1058
1059 spin_unlock(&peer->idr_lock);
1060
1061 return head;
1062 }
1063
hmdfs_client_offline_notify(struct hmdfs_peer *conn, int evt, unsigned int seq)1064 void hmdfs_client_offline_notify(struct hmdfs_peer *conn, int evt,
1065 unsigned int seq)
1066 {
1067 int id;
1068 int count = 0;
1069 struct hmdfs_msg_idr_head *head = NULL;
1070
1071 for (id = 0; (head = hmdfs_get_msg_next(conn, &id)) != NULL; ++id) {
1072 switch (head->type) {
1073 case MSG_IDR_1_0_NONE:
1074 head_put(head);
1075 head_put(head);
1076 break;
1077 case MSG_IDR_MESSAGE_SYNC:
1078 case MSG_IDR_1_0_MESSAGE_SYNC:
1079 hmdfs_response_wakeup((struct sendmsg_wait_queue *)head,
1080 -ETIME, 0, NULL);
1081 hmdfs_debug("wakeup id=%d", head->msg_id);
1082 msg_put((struct sendmsg_wait_queue *)head);
1083 break;
1084 case MSG_IDR_MESSAGE_ASYNC:
1085 hmdfs_wakeup_parasite(
1086 (struct hmdfs_msg_parasite *)head);
1087 hmdfs_debug("wakeup parasite id=%d", head->msg_id);
1088 mp_put((struct hmdfs_msg_parasite *)head);
1089 break;
1090 case MSG_IDR_PAGE:
1091 case MSG_IDR_1_0_PAGE:
1092 hmdfs_wakeup_async_work(
1093 (struct hmdfs_async_work *)head);
1094 hmdfs_debug("wakeup async work id=%d", head->msg_id);
1095 asw_put((struct hmdfs_async_work *)head);
1096 break;
1097 default:
1098 hmdfs_err("Bad type=%d id=%d", head->type,
1099 head->msg_id);
1100 break;
1101 }
1102
1103 count++;
1104 /* If there are too many idr to process, avoid to soft lockup,
1105 * process every 512 message we resched
1106 */
1107 if (count % HMDFS_IDR_RESCHED_COUNT == 0)
1108 cond_resched();
1109 }
1110 }
1111
1112 static struct hmdfs_node_cb_desc client_cb[] = {
1113 {
1114 .evt = NODE_EVT_OFFLINE,
1115 .sync = true,
1116 .fn = hmdfs_client_offline_notify,
1117 },
1118 };
1119
hmdfs_client_add_node_evt_cb(void)1120 void __init hmdfs_client_add_node_evt_cb(void)
1121 {
1122 hmdfs_node_add_evt_cb(client_cb, ARRAY_SIZE(client_cb));
1123 }
1124