xref: /kernel/linux/linux-5.10/fs/hmdfs/file_cloud.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/hmdfs/file_cloud.c
4 *
5 * Copyright (c) 2023-2023 Huawei Device Co., Ltd.
6 */
7
8#include <linux/backing-dev.h>
9#include <linux/file.h>
10#include <linux/fs.h>
11#include <linux/namei.h>
12#include <linux/page-flags.h>
13#include <linux/pagemap.h>
14#include <linux/pagevec.h>
15#include <linux/sched/signal.h>
16#include <linux/slab.h>
17#include <linux/wait.h>
18
19#include "file_remote.h"
20
21#include "comm/socket_adapter.h"
22#include "hmdfs.h"
23#include "hmdfs_client.h"
24#include "hmdfs_dentryfile.h"
25#include "hmdfs_dentryfile_cloud.h"
26#include "hmdfs_trace.h"
27
28static const struct vm_operations_struct hmdfs_cloud_vm_ops = {
29	.fault = filemap_fault,
30	.map_pages = filemap_map_pages,
31	.page_mkwrite = NULL,
32};
33
34int hmdfs_file_open_cloud(struct inode *inode, struct file *file)
35{
36	const char *dir_path;
37	struct hmdfs_sb_info *sbi = inode->i_sb->s_fs_info;
38	struct path root_path;
39	struct file *lower_file;
40	int err = 0;
41
42	struct hmdfs_file_info *gfi = kzalloc(sizeof(*gfi), GFP_KERNEL);
43	if (!gfi)
44		return -ENOMEM;
45
46	if (!sbi->cloud_dir) {
47		hmdfs_info("no cloud_dir");
48		kfree(gfi);
49		return -EPERM;
50	}
51
52	err = kern_path(sbi->cloud_dir, 0, &root_path);
53	if (err) {
54		hmdfs_info("kern_path failed: %d", err);
55		kfree(gfi);
56		return err;
57	}
58
59	dir_path = hmdfs_get_dentry_relative_path(file->f_path.dentry);
60	if(!dir_path) {
61		hmdfs_err("get cloud path failed");
62		kfree(gfi);
63		return -ENOENT;
64	}
65
66	lower_file = file_open_root(&root_path, dir_path,
67			      file->f_flags | O_DIRECT, file->f_mode);
68	path_put(&root_path);
69	if (IS_ERR(lower_file)) {
70		hmdfs_info("file_open_root failed: %ld", PTR_ERR(lower_file));
71		err = PTR_ERR(lower_file);
72		kfree(gfi);
73	} else {
74		gfi->lower_file = lower_file;
75		file->private_data = gfi;
76	}
77	kfree(dir_path);
78	return err;
79}
80
81int hmdfs_file_release_cloud(struct inode *inode, struct file *file)
82{
83	struct hmdfs_file_info *gfi = hmdfs_f(file);
84
85	file->private_data = NULL;
86	fput(gfi->lower_file);
87	kfree(gfi);
88	return 0;
89}
90
91static int hmdfs_file_flush_cloud(struct file *file, fl_owner_t id)
92{
93	struct hmdfs_file_info *gfi = hmdfs_f(file);
94
95	if(!gfi || !gfi->lower_file)
96		return 0;
97
98	if (gfi->lower_file->f_op->flush)
99		return gfi->lower_file->f_op->flush(gfi->lower_file, id);
100	return 0;
101}
102
103int hmdfs_file_mmap_cloud(struct file *file, struct vm_area_struct *vma)
104{
105	struct hmdfs_file_info *private_data = file->private_data;
106	struct file *realfile = NULL;
107	int ret;
108
109	if (!private_data)
110		return -EINVAL;
111
112	realfile = private_data->lower_file;
113	if (!realfile)
114		return -EINVAL;
115
116	if (!realfile->f_op->mmap)
117		return -ENODEV;
118
119	if (WARN_ON(file != vma->vm_file))
120		return -EIO;
121
122	vma->vm_file = get_file(realfile);
123	ret = call_mmap(vma->vm_file, vma);
124	if (ret)
125		fput(realfile);
126	else
127		fput(file);
128
129	file_accessed(file);
130
131	return ret;
132}
133
134static int hmdfs_do_readpages_cloud(struct file *filp, int cnt,
135				    struct page **vec)
136{
137	struct hmdfs_file_info *gfi = filp->private_data;
138	struct file *lower_filp;
139	loff_t pos = (loff_t)(vec[0]->index) << HMDFS_PAGE_OFFSET;
140	void *pages_buf = NULL;
141	int idx, ret;
142
143	if (gfi) {
144		lower_filp = gfi->lower_file;
145	}
146	else {
147		ret = -EINVAL;
148		goto out_err;
149	}
150
151	pages_buf = vmap(vec, cnt, VM_MAP, PAGE_KERNEL);
152	if (!pages_buf) {
153		ret = -ENOMEM;
154		goto out_err;
155	}
156
157	trace_hmdfs_do_readpages_cloud_begin(cnt, pos);
158	ret = kernel_read(lower_filp, pages_buf, cnt * HMDFS_PAGE_SIZE, &pos);
159	trace_hmdfs_do_readpages_cloud_end(cnt, pos, ret);
160
161	if (ret >= 0)
162		memset(pages_buf + ret, 0, cnt * HMDFS_PAGE_SIZE - ret);
163	else
164		goto out_err;
165
166	vunmap(pages_buf);
167	for (idx = 0; idx < cnt; ++idx) {
168		SetPageUptodate(vec[idx]);
169		unlock_page(vec[idx]);
170	}
171	goto out;
172
173out_err:
174	if (pages_buf)
175		vunmap(pages_buf);
176	for (idx = 0; idx < cnt; ++idx) {
177		ClearPageUptodate(vec[idx]);
178		delete_from_page_cache(vec[idx]);
179		unlock_page(vec[idx]);
180		put_page(vec[idx]);
181	}
182out:
183	return ret;
184}
185
186static int hmdfs_readpages_cloud(struct file *filp,
187				 struct address_space *mapping,
188				 struct list_head *pages,
189				 unsigned int nr_pages)
190{
191	struct hmdfs_sb_info *sbi = hmdfs_sb(file_inode(filp)->i_sb);
192	unsigned int ret = 0, idx, cnt, limit;
193	unsigned long next_index;
194	gfp_t gfp = readahead_gfp_mask(mapping);
195	struct page **vec = NULL;
196
197	limit = sbi->s_readpages_nr;
198	vec = kmalloc(limit * sizeof(*vec), GFP_KERNEL);
199	if (!vec) {
200		hmdfs_warning("cannot alloc vec (%u pages)", limit);
201		return -ENOMEM;
202	}
203
204	cnt = 0;
205	next_index = 0;
206	for (idx = 0; idx < nr_pages; ++idx) {
207		struct page *page = lru_to_page(pages);
208
209		list_del(&page->lru);
210		if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
211			unlock_page(page);
212			put_page(page);
213			continue;
214		}
215
216		if (cnt && (cnt >= limit || page->index != next_index)) {
217			ret = hmdfs_do_readpages_cloud(filp, cnt, vec);
218			cnt = 0;
219			if (ret)
220				break;
221		}
222		next_index = page->index + 1;
223		vec[cnt++] = page;
224	}
225
226	if (cnt)
227		ret = hmdfs_do_readpages_cloud(filp, cnt, vec);
228
229	kfree(vec);
230	trace_hmdfs_readpages_cloud(nr_pages, pages);
231	return ret;
232}
233
234static int hmdfs_readpage(struct file *file, struct page *page)
235{
236	loff_t offset = page_file_offset(page);
237	int ret = -EACCES;
238	char *page_buf;
239	struct hmdfs_file_info *gfi = file->private_data;
240	struct file *lower_file;
241
242	if (gfi)
243		lower_file = gfi->lower_file;
244	else
245		goto out;
246
247	page_buf = kmap(page);
248	if (!page_buf)
249		goto out;
250	ret = kernel_read(lower_file, page_buf, PAGE_SIZE, &offset);
251
252	if (ret >= 0 && ret <= PAGE_SIZE) {
253		ret = 0;
254		memset(page_buf + ret, 0, PAGE_SIZE - ret);
255	}
256
257	kunmap(page);
258	if (ret == 0)
259		SetPageUptodate(page);
260out:
261	unlock_page(page);
262	return ret;
263}
264
265const struct file_operations hmdfs_dev_file_fops_cloud = {
266	.owner = THIS_MODULE,
267	.llseek = generic_file_llseek,
268	.read_iter = generic_file_read_iter,
269	.write_iter = NULL,
270	.mmap = hmdfs_file_mmap_cloud,
271	.open = hmdfs_file_open_cloud,
272	.release = hmdfs_file_release_cloud,
273	.flush = hmdfs_file_flush_cloud,
274	.fsync = NULL,
275	.splice_read = NULL,
276	.splice_write = NULL,
277};
278
279
280const struct address_space_operations hmdfs_dev_file_aops_cloud = {
281	.readpage = hmdfs_readpage,
282	.readpages = hmdfs_readpages_cloud,
283	.write_begin = NULL,
284	.write_end = NULL,
285	.writepage = NULL,
286	.set_page_dirty = NULL,
287};
288
289const struct address_space_operations hmdfs_aops_cloud = {
290	.readpage = hmdfs_readpage,
291	.readpages = hmdfs_readpages_cloud,
292};
293
294int analysis_dentry_file_from_cloud(struct hmdfs_sb_info *sbi,
295				    struct file *file, struct file *handler,
296				    struct dir_context *ctx)
297{
298	struct hmdfs_dentry_group_cloud *dentry_group = NULL;
299	loff_t pos = ctx->pos;
300	unsigned long dev_id = (unsigned long)((pos << 1) >> (POS_BIT_NUM - DEV_ID_BIT_NUM));
301	unsigned long group_id = (unsigned long)((pos << (1 + DEV_ID_BIT_NUM)) >>
302				 (POS_BIT_NUM - GROUP_ID_BIT_NUM));
303	loff_t offset = pos & OFFSET_BIT_MASK;
304	int group_num = 0;
305	char *dentry_name = NULL;
306	int iterate_result = 0;
307	int i, j;
308
309	dentry_group = kzalloc(sizeof(*dentry_group), GFP_KERNEL);
310
311	if (!dentry_group)
312		return -ENOMEM;
313
314	if (IS_ERR_OR_NULL(handler)) {
315		kfree(dentry_group);
316		return -ENOENT;
317	}
318
319	group_num = get_dentry_group_cnt(file_inode(handler));
320	dentry_name = kzalloc(DENTRY_NAME_MAX_LEN, GFP_KERNEL);
321	if (!dentry_name) {
322		kfree(dentry_group);
323		return -ENOMEM;
324	}
325
326	for (i = group_id; i < group_num; i++) {
327		int ret = hmdfs_metainfo_read_nocred(handler, dentry_group,
328					      sizeof(struct hmdfs_dentry_group_cloud),
329					      i);
330		if (ret != sizeof(struct hmdfs_dentry_group_cloud)) {
331			hmdfs_err("read dentry group failed ret:%d", ret);
332			goto done;
333		}
334
335		for (j = offset; j < DENTRY_PER_GROUP_CLOUD; j++) {
336			int len;
337			int file_type = DT_UNKNOWN;
338			bool is_continue;
339
340			len = le16_to_cpu(dentry_group->nsl[j].namelen);
341			if (!test_bit_le(j, dentry_group->bitmap) || len == 0)
342				continue;
343
344			memset(dentry_name, 0, DENTRY_NAME_MAX_LEN);
345			if (S_ISDIR(le16_to_cpu(dentry_group->nsl[j].i_mode)))
346				file_type = DT_DIR;
347			else if (S_ISREG(le16_to_cpu(
348					 dentry_group->nsl[j].i_mode)))
349				file_type = DT_REG;
350
351			strncat(dentry_name, dentry_group->filename[j], len);
352			pos = hmdfs_set_pos(dev_id, i, j);
353			is_continue =
354				dir_emit(ctx, dentry_name, len,
355					 pos + INUNUMBER_START, file_type);
356			if (!is_continue) {
357				ctx->pos = pos;
358				iterate_result = 1;
359				goto done;
360			}
361		}
362		offset = 0;
363	}
364
365done:
366	kfree(dentry_name);
367	kfree(dentry_group);
368	return iterate_result;
369}
370
371static int hmdfs_iterate_cloud(struct file *file, struct dir_context *ctx)
372{
373	int err = 0;
374	loff_t start_pos = ctx->pos;
375
376	if (ctx->pos == -1)
377		return 0;
378	err = analysis_dentry_file_from_cloud(
379		file->f_inode->i_sb->s_fs_info, file, file->private_data, ctx);
380
381	if (err <= 0)
382		ctx->pos = -1;
383
384	trace_hmdfs_iterate_remote(file->f_path.dentry, start_pos, ctx->pos,
385				   err);
386	return err;
387}
388
389int hmdfs_dir_open_cloud(struct inode *inode, struct file *file)
390{
391	struct clearcache_item *cache_item = NULL;
392
393	get_cloud_cache_file(file->f_path.dentry, file->f_inode->i_sb->s_fs_info);
394	cache_item = hmdfs_find_cache_item(CLOUD_DEVICE,
395					   file->f_path.dentry);
396	if (cache_item) {
397		file->private_data = cache_item->filp;
398		get_file(file->private_data);
399		kref_put(&cache_item->ref, release_cache_item);
400		return 0;
401	}
402
403	return -ENOENT;
404}
405
406static int hmdfs_dir_release_cloud(struct inode *inode, struct file *file)
407{
408	if (file->private_data)
409		fput(file->private_data);
410	file->private_data = NULL;
411	return 0;
412}
413
414const struct file_operations hmdfs_dev_dir_ops_cloud = {
415	.owner = THIS_MODULE,
416	.iterate = hmdfs_iterate_cloud,
417	.open = hmdfs_dir_open_cloud,
418	.release = hmdfs_dir_release_cloud,
419	.fsync = __generic_file_fsync,
420};
421