1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/hmdfs/inode.c
4 *
5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd.
6 */
7
8 #include "hmdfs_device_view.h"
9 #include "inode.h"
10 #include "comm/connection.h"
11
12 /**
13 * Rules to generate inode numbers:
14 *
15 * "/", "/device_view", "/merge_view", "/device_view/local", "/device_view/cid"
16 * = DOMAIN {3} : dev_id {29} : HMDFS_ROOT {32}
17 *
18 * "/device_view/cid/xxx"
19 * = DOMAIN {3} : dev_id {29} : hash(remote_ino){32}
20 *
21 * "/merge_view/xxx"
22 * = DOMAIN {3} : lower's dev_id {29} : lower's ino_raw {32}
23 */
24
25 #define BIT_WIDE_TOTAL 64
26
27 #define BIT_WIDE_DOMAIN 3
28 #define BIT_WIDE_DEVID 29
29 #define BIT_WIDE_INO_RAW 32
30
31 enum DOMAIN {
32 DOMAIN_ROOT,
33 DOMAIN_DEVICE_LOCAL,
34 DOMAIN_DEVICE_REMOTE,
35 DOMAIN_DEVICE_CLOUD,
36 DOMAIN_MERGE_VIEW,
37 DOMAIN_CLOUD_MERGE_VIEW,
38 DOMAIN_INVALID,
39 };
40
41 union hmdfs_ino {
42 const uint64_t ino_output;
43 struct {
44 uint64_t ino_raw : BIT_WIDE_INO_RAW;
45 uint64_t dev_id : BIT_WIDE_DEVID;
46 uint8_t domain : BIT_WIDE_DOMAIN;
47 };
48 };
49
read_ino_domain(uint64_t ino)50 static uint8_t read_ino_domain(uint64_t ino)
51 {
52 union hmdfs_ino _ino = {
53 .ino_output = ino,
54 };
55
56 return _ino.domain;
57 }
58
59 struct iget_args {
60 /* The lower inode of local/merge/root(part) inode */
61 struct inode *lo_i;
62 /* The peer of remote inode */
63 struct hmdfs_peer *peer;
64 /* The ino of remote inode */
65 uint64_t remote_ino;
66
67 /* The recordId of cloud inode */
68 uint8_t *cloud_record_id;
69 uint8_t *reserved;
70
71 /* Returned inode's ino */
72 union hmdfs_ino ino;
73 };
74
75 /**
76 * iget_test - whether or not the inode with matched hashval is the one we are
77 * looking for
78 *
79 * @inode: the local inode we found in inode cache with matched hashval
80 * @data: struct iget_args
81 */
iget_test(struct inode *inode, void *data)82 static int iget_test(struct inode *inode, void *data)
83 {
84 struct hmdfs_inode_info *hii = hmdfs_i(inode);
85 struct iget_args *ia = data;
86 int res = 0;
87
88 WARN_ON(ia->ino.domain < DOMAIN_ROOT ||
89 ia->ino.domain >= DOMAIN_INVALID);
90
91 if (read_ino_domain(inode->i_ino) == DOMAIN_ROOT)
92 return 1;
93 if (read_ino_domain(inode->i_ino) != ia->ino.domain)
94 return 0;
95
96 switch (ia->ino.domain) {
97 case DOMAIN_MERGE_VIEW:
98 case DOMAIN_CLOUD_MERGE_VIEW:
99 res = (ia->lo_i == hii->lower_inode);
100 break;
101 case DOMAIN_DEVICE_LOCAL:
102 res = (ia->lo_i == hii->lower_inode);
103 break;
104 case DOMAIN_DEVICE_REMOTE:
105 res = (ia->peer == hii->conn &&
106 ia->remote_ino == hii->remote_ino);
107 break;
108 case DOMAIN_DEVICE_CLOUD:
109 res = (ia->cloud_record_id &&
110 (memcmp(ia->cloud_record_id, hii->cloud_record_id,
111 CLOUD_RECORD_ID_LEN) == 0) &&
112 (ia->reserved[0] == hii->reserved[0]));
113 break;
114 }
115
116 return res;
117 }
118
119 /**
120 * iget_set - initialize a inode with iget_args
121 *
122 * @sb: the superblock of current hmdfs instance
123 * @data: struct iget_args
124 */
iget_set(struct inode *inode, void *data)125 static int iget_set(struct inode *inode, void *data)
126 {
127 struct hmdfs_inode_info *hii = hmdfs_i(inode);
128 struct iget_args *ia = (struct iget_args *)data;
129
130 inode->i_ino = ia->ino.ino_output;
131 inode_inc_iversion(inode);
132
133 hii->conn = ia->peer;
134 hii->remote_ino = ia->remote_ino;
135 hii->lower_inode = ia->lo_i;
136
137 if (ia->cloud_record_id) {
138 memcpy(hii->cloud_record_id, ia->cloud_record_id, CLOUD_RECORD_ID_LEN);
139 memcpy(hii->reserved, ia->reserved, CLOUD_DENTRY_RESERVED_LENGTH);
140 }
141
142 return 0;
143 }
144
make_ino_raw_dev_local(uint64_t lo_ino)145 static uint64_t make_ino_raw_dev_local(uint64_t lo_ino)
146 {
147 if (!(lo_ino >> BIT_WIDE_INO_RAW))
148 return lo_ino;
149
150 return lo_ino * GOLDEN_RATIO_64 >> BIT_WIDE_INO_RAW;
151 }
152
make_ino_raw_dev_remote(uint64_t remote_ino)153 static uint64_t make_ino_raw_dev_remote(uint64_t remote_ino)
154 {
155 return hash_long(remote_ino, BIT_WIDE_INO_RAW);
156 }
157
158 /**
159 * hmdfs_iget5_locked_merge - obtain an inode for the merge-view
160 *
161 * @sb: superblock of current instance
162 * @fst_lo_i: the lower inode of it's first comrade
163 *
164 * Simply replace the lower's domain for a new ino.
165 */
hmdfs_iget5_locked_merge(struct super_block *sb, struct dentry *fst_lo_d)166 struct inode *hmdfs_iget5_locked_merge(struct super_block *sb,
167 struct dentry *fst_lo_d)
168 {
169 struct iget_args ia = {
170 .lo_i = d_inode(fst_lo_d),
171 .peer = NULL,
172 .remote_ino = 0,
173 .cloud_record_id = NULL,
174 .ino.ino_output = 0,
175 };
176
177 if (unlikely(!d_inode(fst_lo_d))) {
178 hmdfs_err("Received a invalid lower inode");
179 return NULL;
180 }
181 if (unlikely(!hmdfs_d(fst_lo_d))) {
182 hmdfs_err("Received a invalid fsdata");
183 return NULL;
184 }
185
186 ia.ino.ino_raw = d_inode(fst_lo_d)->i_ino;
187 ia.ino.dev_id = hmdfs_d(fst_lo_d)->device_id;
188 ia.ino.domain = DOMAIN_MERGE_VIEW;
189 return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia);
190 }
191
hmdfs_iget5_locked_cloud_merge(struct super_block *sb, struct dentry *fst_lo_d)192 struct inode *hmdfs_iget5_locked_cloud_merge(struct super_block *sb,
193 struct dentry *fst_lo_d)
194 {
195 struct iget_args ia = {
196 .lo_i = d_inode(fst_lo_d),
197 .peer = NULL,
198 .remote_ino = 0,
199 .cloud_record_id = NULL,
200 .ino.ino_output = 0,
201 };
202
203 if (unlikely(!d_inode(fst_lo_d))) {
204 hmdfs_err("Received a invalid lower inode");
205 return NULL;
206 }
207 if (unlikely(!hmdfs_d(fst_lo_d))) {
208 hmdfs_err("Received a invalid fsdata");
209 return NULL;
210 }
211
212 ia.ino.ino_raw = d_inode(fst_lo_d)->i_ino;
213 ia.ino.dev_id = hmdfs_d(fst_lo_d)->device_id;
214 ia.ino.domain = DOMAIN_CLOUD_MERGE_VIEW;
215 return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia);
216 }
217
218 /**
219 * hmdfs_iget5_locked_local - obtain an inode for the local-dev-view
220 *
221 * @sb: superblock of current instance
222 * @lo_i: the lower inode from local filesystem
223 *
224 * Hashing local inode's ino to generate our ino. We continue to compare the
225 * address of the lower_inode for uniqueness when collisions occurred.
226 */
hmdfs_iget5_locked_local(struct super_block *sb, struct inode *lo_i)227 struct inode *hmdfs_iget5_locked_local(struct super_block *sb,
228 struct inode *lo_i)
229 {
230 struct iget_args ia = {
231 .lo_i = lo_i,
232 .peer = NULL,
233 .remote_ino = 0,
234 .cloud_record_id = NULL,
235 .ino.ino_output = 0,
236 };
237
238 if (unlikely(!lo_i)) {
239 hmdfs_err("Received a invalid lower inode");
240 return NULL;
241 }
242 ia.ino.ino_raw = make_ino_raw_dev_local(lo_i->i_ino);
243 ia.ino.dev_id = 0;
244 ia.ino.domain = DOMAIN_DEVICE_LOCAL;
245 return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia);
246 }
247
248 /**
249 * hmdfs_iget5_locked_remote - obtain an inode for the remote-dev-view
250 *
251 * @sb: superblock of current instance
252 * @peer: corresponding device node
253 * @remote_ino: remote inode's ino
254 *
255 * Hash remote ino for ino's 32bit~1bit.
256 *
257 * Note that currenly implementation assume the each remote inode has unique
258 * ino. Thus the combination of the peer's unique dev_id and the remote_ino
259 * is enough to determine a unique remote inode.
260 */
hmdfs_iget5_locked_remote(struct super_block *sb, struct hmdfs_peer *peer, uint64_t remote_ino)261 struct inode *hmdfs_iget5_locked_remote(struct super_block *sb,
262 struct hmdfs_peer *peer,
263 uint64_t remote_ino)
264 {
265 struct iget_args ia = {
266 .lo_i = NULL,
267 .peer = peer,
268 .remote_ino = remote_ino,
269 .cloud_record_id = NULL,
270 .ino.ino_output = 0,
271 };
272
273 if (unlikely(!peer)) {
274 hmdfs_err("Received a invalid peer");
275 return NULL;
276 }
277
278 ia.ino.ino_raw = make_ino_raw_dev_remote(remote_ino);
279 ia.ino.dev_id = peer->device_id;
280 ia.ino.domain = DOMAIN_DEVICE_REMOTE;
281 return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia);
282 }
283
284 /**
285 * hmdfs_iget5_locked_cloud - obtain an inode for the cloud-dev-view
286 *
287 * @sb: superblock of current instance
288 * @peer: corresponding device node
289 * @cloud_id: cloud file record id
290 *
291 * Hash remote ino for ino's 32bit~1bit.
292 *
293 * Note that currenly implementation assume the each remote inode has unique
294 * ino. Thus the combination of the peer's unique dev_id and the remote_ino
295 * is enough to determine a unique remote inode.
296 */
hmdfs_iget5_locked_cloud(struct super_block *sb, struct hmdfs_peer *peer, struct hmdfs_lookup_cloud_ret *res)297 struct inode *hmdfs_iget5_locked_cloud(struct super_block *sb,
298 struct hmdfs_peer *peer,
299 struct hmdfs_lookup_cloud_ret *res)
300 {
301 struct iget_args ia = {
302 .lo_i = NULL,
303 .peer = peer,
304 .remote_ino = 0,
305 .cloud_record_id = res->record_id,
306 .reserved = res->reserved,
307 .ino.ino_output = 0,
308 };
309
310 if (unlikely(!peer)) {
311 hmdfs_err("Received a invalid peer");
312 return NULL;
313 }
314
315 ia.ino.ino_raw = make_ino_raw_cloud(res->record_id) + res->reserved[0];
316 ia.ino.dev_id = peer->device_id;
317 ia.ino.domain = DOMAIN_DEVICE_CLOUD;
318 return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia);
319 }
320
hmdfs_iget_locked_root(struct super_block *sb, uint64_t root_ino, struct inode *lo_i, struct hmdfs_peer *peer)321 struct inode *hmdfs_iget_locked_root(struct super_block *sb, uint64_t root_ino,
322 struct inode *lo_i,
323 struct hmdfs_peer *peer)
324 {
325 struct iget_args ia = {
326 .lo_i = lo_i,
327 .peer = peer,
328 .remote_ino = 0,
329 .cloud_record_id = NULL,
330 .ino.ino_raw = root_ino,
331 .ino.dev_id = peer ? peer->device_id : 0,
332 .ino.domain = DOMAIN_ROOT,
333 };
334
335 if (unlikely(root_ino < 0 || root_ino >= HMDFS_ROOT_INVALID)) {
336 hmdfs_err("Root %llu is invalid", root_ino);
337 return NULL;
338 }
339 if (unlikely(root_ino == HMDFS_ROOT_DEV_REMOTE && !peer)) {
340 hmdfs_err("Root %llu received a invalid peer", root_ino);
341 return NULL;
342 }
343
344 return iget5_locked(sb, ia.ino.ino_output, iget_test, iget_set, &ia);
345 }
346
347
hmdfs_update_upper_file(struct file *upper_file, struct file *lower_file)348 void hmdfs_update_upper_file(struct file *upper_file, struct file *lower_file)
349 {
350 loff_t upper_size = i_size_read(upper_file->f_inode);
351 loff_t lower_size = i_size_read(lower_file->f_inode);
352
353 if (upper_file->f_inode->i_mapping && upper_size != lower_size) {
354 i_size_write(upper_file->f_inode, lower_size);
355 truncate_inode_pages(upper_file->f_inode->i_mapping, 0);
356 }
357 }