xref: /kernel/linux/linux-6.6/fs/hmdfs/file_cloud.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/hmdfs/file_cloud.c
4 *
5 * Copyright (c) 2023-2023 Huawei Device Co., Ltd.
6 */
7
8#include <linux/backing-dev.h>
9#include <linux/file.h>
10#include <linux/fs.h>
11#include <linux/namei.h>
12#include <linux/page-flags.h>
13#include <linux/pagemap.h>
14#include <linux/pagevec.h>
15#include <linux/sched/signal.h>
16#include <linux/slab.h>
17#include <linux/wait.h>
18
19#include "file_remote.h"
20
21#include "comm/socket_adapter.h"
22#include "hmdfs.h"
23#include "hmdfs_client.h"
24#include "hmdfs_dentryfile.h"
25#include "hmdfs_dentryfile_cloud.h"
26#include "hmdfs_trace.h"
27
28static const struct vm_operations_struct hmdfs_cloud_vm_ops = {
29	.fault = filemap_fault,
30	.map_pages = filemap_map_pages,
31	.page_mkwrite = NULL,
32};
33
34int hmdfs_file_open_cloud(struct inode *inode, struct file *file)
35{
36	const char *dir_path;
37	struct hmdfs_sb_info *sbi = inode->i_sb->s_fs_info;
38	struct path root_path;
39	struct file *lower_file;
40	int err = 0;
41
42	struct hmdfs_file_info *gfi = kzalloc(sizeof(*gfi), GFP_KERNEL);
43	if (!gfi)
44		return -ENOMEM;
45
46	if (!sbi->cloud_dir) {
47		hmdfs_info("no cloud_dir");
48		kfree(gfi);
49		return -EPERM;
50	}
51
52	err = kern_path(sbi->cloud_dir, 0, &root_path);
53	if (err) {
54		hmdfs_info("kern_path failed: %d", err);
55		kfree(gfi);
56		return err;
57	}
58
59	dir_path = hmdfs_get_dentry_relative_path(file->f_path.dentry);
60	if(!dir_path) {
61		hmdfs_err("get cloud path failed");
62		kfree(gfi);
63		return -ENOENT;
64	}
65
66	lower_file = file_open_root(&root_path, dir_path,
67			      file->f_flags | O_DIRECT, file->f_mode);
68	path_put(&root_path);
69	if (IS_ERR(lower_file)) {
70		hmdfs_info("file_open_root failed: %ld", PTR_ERR(lower_file));
71		err = PTR_ERR(lower_file);
72		kfree(gfi);
73	} else {
74		gfi->lower_file = lower_file;
75		file->private_data = gfi;
76	}
77	kfree(dir_path);
78	return err;
79}
80
81int hmdfs_file_release_cloud(struct inode *inode, struct file *file)
82{
83	struct hmdfs_file_info *gfi = hmdfs_f(file);
84
85	file->private_data = NULL;
86	fput(gfi->lower_file);
87	kfree(gfi);
88	return 0;
89}
90
91static int hmdfs_file_flush_cloud(struct file *file, fl_owner_t id)
92{
93	struct hmdfs_file_info *gfi = hmdfs_f(file);
94
95	if(!gfi || !gfi->lower_file)
96		return 0;
97
98	if (gfi->lower_file->f_op->flush)
99		return gfi->lower_file->f_op->flush(gfi->lower_file, id);
100	return 0;
101}
102
103int hmdfs_file_mmap_cloud(struct file *file, struct vm_area_struct *vma)
104{
105	struct hmdfs_file_info *private_data = file->private_data;
106	struct file *realfile = NULL;
107	int ret;
108
109	if (!private_data)
110		return -EINVAL;
111
112	realfile = private_data->lower_file;
113	if (!realfile)
114		return -EINVAL;
115
116	if (!realfile->f_op->mmap)
117		return -ENODEV;
118
119	if (WARN_ON(file != vma->vm_file))
120		return -EIO;
121
122	vma->vm_file = get_file(realfile);
123	ret = call_mmap(vma->vm_file, vma);
124	if (ret)
125		fput(realfile);
126	else
127		fput(file);
128
129	file_accessed(file);
130
131	return ret;
132}
133
134static int hmdfs_do_readpages_cloud(struct file *filp, int cnt,
135				    struct page **vec)
136{
137	struct hmdfs_file_info *gfi = filp->private_data;
138	struct file *lower_filp;
139	loff_t pos = (loff_t)(vec[0]->index) << HMDFS_PAGE_OFFSET;
140	void *pages_buf = NULL;
141	int idx, ret;
142
143	if (gfi) {
144		lower_filp = gfi->lower_file;
145	}
146	else {
147		ret = -EINVAL;
148		goto out_err;
149	}
150
151	pages_buf = vmap(vec, cnt, VM_MAP, PAGE_KERNEL);
152	if (!pages_buf) {
153		ret = -ENOMEM;
154		goto out_err;
155	}
156
157	trace_hmdfs_do_readpages_cloud_begin(cnt, pos);
158	ret = kernel_read(lower_filp, pages_buf, cnt * HMDFS_PAGE_SIZE, &pos);
159	trace_hmdfs_do_readpages_cloud_end(cnt, pos, ret);
160
161	if (ret >= 0)
162		memset(pages_buf + ret, 0, cnt * HMDFS_PAGE_SIZE - ret);
163	else
164		goto out_err;
165
166	vunmap(pages_buf);
167	for (idx = 0; idx < cnt; ++idx) {
168		SetPageUptodate(vec[idx]);
169		unlock_page(vec[idx]);
170	}
171	goto out;
172
173out_err:
174	if (pages_buf)
175		vunmap(pages_buf);
176	for (idx = 0; idx < cnt; ++idx) {
177		folio_clear_uptodate((struct folio *)vec[idx]);
178		filemap_remove_folio((struct folio *)vec[idx]);
179		unlock_page(vec[idx]);
180		put_page(vec[idx]);
181	}
182out:
183	return ret;
184}
185
186static void hmdfs_readahead(struct readahead_control *ractl)
187{
188	struct file *filp = ractl->file;
189	struct address_space *mapping = ractl->mapping;
190	unsigned int nr_pages = readahead_count(ractl);
191	struct hmdfs_sb_info *sbi = hmdfs_sb(file_inode(filp)->i_sb);
192	unsigned int ret = 0, idx, cnt, limit;
193	unsigned long next_index;
194	gfp_t gfp = readahead_gfp_mask(mapping);
195	struct page **vec = NULL;
196
197	limit = sbi->s_readpages_nr;
198	vec = kmalloc(limit * sizeof(*vec), GFP_KERNEL);
199	if (!vec) {
200		hmdfs_warning("cannot alloc vec (%u pages)", limit);
201		return;
202	}
203
204	cnt = 0;
205	next_index = 0;
206	for (idx = 0; idx < nr_pages; ++idx) {
207		struct page *page = readahead_page(ractl);
208
209		if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
210			unlock_page(page);
211			put_page(page);
212			continue;
213		}
214
215		if (cnt && (cnt >= limit || page->index != next_index)) {
216			ret = hmdfs_do_readpages_cloud(filp, cnt, vec);
217			cnt = 0;
218			if (ret)
219				break;
220		}
221		next_index = page->index + 1;
222		vec[cnt++] = page;
223	}
224
225	if (cnt)
226		ret = hmdfs_do_readpages_cloud(filp, cnt, vec);
227
228	kfree(vec);
229	trace_hmdfs_readpages_cloud(nr_pages, ret);
230	return;
231}
232
233static int hmdfs_readpage(struct file *file, struct page *page)
234{
235	loff_t offset = page_file_offset(page);
236	int ret = -EACCES;
237	char *page_buf;
238	struct hmdfs_file_info *gfi = file->private_data;
239	struct file *lower_file;
240
241	if (gfi)
242		lower_file = gfi->lower_file;
243	else
244		goto out;
245
246	page_buf = kmap(page);
247	if (!page_buf)
248		goto out;
249	ret = kernel_read(lower_file, page_buf, PAGE_SIZE, &offset);
250
251	if (ret >= 0 && ret <= PAGE_SIZE) {
252		ret = 0;
253		memset(page_buf + ret, 0, PAGE_SIZE - ret);
254	}
255
256	kunmap(page);
257	if (ret == 0)
258		SetPageUptodate(page);
259out:
260	unlock_page(page);
261	return ret;
262}
263
264static int hmdfs_read_folio(struct file *file, struct folio *folio)
265{
266	struct page *page = &folio->page;
267	return hmdfs_readpage(file, page);
268}
269
270const struct file_operations hmdfs_dev_file_fops_cloud = {
271	.owner = THIS_MODULE,
272	.llseek = generic_file_llseek,
273	.read_iter = generic_file_read_iter,
274	.write_iter = NULL,
275	.mmap = hmdfs_file_mmap_cloud,
276	.open = hmdfs_file_open_cloud,
277	.release = hmdfs_file_release_cloud,
278	.flush = hmdfs_file_flush_cloud,
279	.fsync = NULL,
280	.splice_read = NULL,
281	.splice_write = NULL,
282};
283
284
285const struct address_space_operations hmdfs_dev_file_aops_cloud = {
286	.read_folio = hmdfs_read_folio,
287	.readahead = hmdfs_readahead,
288	.write_begin = NULL,
289	.write_end = NULL,
290	.writepage = NULL,
291	.dirty_folio = NULL,
292};
293
294const struct address_space_operations hmdfs_aops_cloud = {
295	.read_folio = hmdfs_read_folio,
296	.readahead = hmdfs_readahead,
297};
298
299int analysis_dentry_file_from_cloud(struct hmdfs_sb_info *sbi,
300				    struct file *file, struct file *handler,
301				    struct dir_context *ctx)
302{
303	struct hmdfs_dentry_group_cloud *dentry_group = NULL;
304	loff_t pos = ctx->pos;
305	unsigned long dev_id = (unsigned long)((pos << 1) >> (POS_BIT_NUM - DEV_ID_BIT_NUM));
306	unsigned long group_id = (unsigned long)((pos << (1 + DEV_ID_BIT_NUM)) >>
307				 (POS_BIT_NUM - GROUP_ID_BIT_NUM));
308	loff_t offset = pos & OFFSET_BIT_MASK;
309	int group_num = 0;
310	char *dentry_name = NULL;
311	int iterate_result = 0;
312	int i, j;
313
314	dentry_group = kzalloc(sizeof(*dentry_group), GFP_KERNEL);
315
316	if (!dentry_group)
317		return -ENOMEM;
318
319	if (IS_ERR_OR_NULL(handler)) {
320		kfree(dentry_group);
321		return -ENOENT;
322	}
323
324	group_num = get_dentry_group_cnt(file_inode(handler));
325	dentry_name = kzalloc(DENTRY_NAME_MAX_LEN, GFP_KERNEL);
326	if (!dentry_name) {
327		kfree(dentry_group);
328		return -ENOMEM;
329	}
330
331	for (i = group_id; i < group_num; i++) {
332		int ret = hmdfs_metainfo_read_nocred(handler, dentry_group,
333					      sizeof(struct hmdfs_dentry_group_cloud),
334					      i);
335		if (ret != sizeof(struct hmdfs_dentry_group_cloud)) {
336			hmdfs_err("read dentry group failed ret:%d", ret);
337			goto done;
338		}
339
340		for (j = offset; j < DENTRY_PER_GROUP_CLOUD; j++) {
341			int len;
342			int file_type = DT_UNKNOWN;
343			bool is_continue;
344
345			len = le16_to_cpu(dentry_group->nsl[j].namelen);
346			if (!test_bit_le(j, dentry_group->bitmap) || len == 0)
347				continue;
348
349			memset(dentry_name, 0, DENTRY_NAME_MAX_LEN);
350			if (S_ISDIR(le16_to_cpu(dentry_group->nsl[j].i_mode)))
351				file_type = DT_DIR;
352			else if (S_ISREG(le16_to_cpu(
353					 dentry_group->nsl[j].i_mode)))
354				file_type = DT_REG;
355
356			strncat(dentry_name, dentry_group->filename[j], len);
357			pos = hmdfs_set_pos(dev_id, i, j);
358			is_continue =
359				dir_emit(ctx, dentry_name, len,
360					 pos + INUNUMBER_START, file_type);
361			if (!is_continue) {
362				ctx->pos = pos;
363				iterate_result = 1;
364				goto done;
365			}
366		}
367		offset = 0;
368	}
369
370done:
371	kfree(dentry_name);
372	kfree(dentry_group);
373	return iterate_result;
374}
375
376static int hmdfs_iterate_cloud(struct file *file, struct dir_context *ctx)
377{
378	int err = 0;
379	loff_t start_pos = ctx->pos;
380
381	if (ctx->pos == -1)
382		return 0;
383	err = analysis_dentry_file_from_cloud(
384		file->f_inode->i_sb->s_fs_info, file, file->private_data, ctx);
385
386	if (err <= 0)
387		ctx->pos = -1;
388
389	trace_hmdfs_iterate_remote(file->f_path.dentry, start_pos, ctx->pos,
390				   err);
391	return err;
392}
393
394int hmdfs_dir_open_cloud(struct inode *inode, struct file *file)
395{
396	struct clearcache_item *cache_item = NULL;
397
398	get_cloud_cache_file(file->f_path.dentry, file->f_inode->i_sb->s_fs_info);
399	cache_item = hmdfs_find_cache_item(CLOUD_DEVICE,
400					   file->f_path.dentry);
401	if (cache_item) {
402		file->private_data = cache_item->filp;
403		get_file(file->private_data);
404		kref_put(&cache_item->ref, release_cache_item);
405		return 0;
406	}
407
408	return -ENOENT;
409}
410
411static int hmdfs_dir_release_cloud(struct inode *inode, struct file *file)
412{
413	if (file->private_data)
414		fput(file->private_data);
415	file->private_data = NULL;
416	return 0;
417}
418
419const struct file_operations hmdfs_dev_dir_ops_cloud = {
420	.owner = THIS_MODULE,
421	.iterate_shared = hmdfs_iterate_cloud,
422	.open = hmdfs_dir_open_cloud,
423	.release = hmdfs_dir_release_cloud,
424	.fsync = __generic_file_fsync,
425};
426