1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/hmdfs/inode_cloud.c 4 * 5 * Copyright (c) 2023-2023 Huawei Device Co., Ltd. 6 */ 7 8#include <linux/fs_stack.h> 9#include <linux/namei.h> 10#include <linux/xattr.h> 11#include <linux/string.h> 12 13#include "comm/socket_adapter.h" 14#include "hmdfs.h" 15#include "hmdfs_client.h" 16#include "hmdfs_dentryfile.h" 17#include "hmdfs_dentryfile_cloud.h" 18#include "hmdfs_share.h" 19#include "hmdfs_trace.h" 20#include "authority/authentication.h" 21#include "stash.h" 22 23uint32_t make_ino_raw_cloud(uint8_t *cloud_id) 24{ 25 struct qstr str; 26 27 str.len = CLOUD_RECORD_ID_LEN; 28 str.name = cloud_id; 29 return hmdfs_dentry_hash(&str, CLOUD_RECORD_ID_LEN); 30} 31 32struct hmdfs_lookup_cloud_ret *lookup_cloud_dentry(struct dentry *child_dentry, 33 const struct qstr *qstr, 34 uint64_t dev_id) 35{ 36 int err; 37 struct hmdfs_lookup_cloud_ret *lookup_ret; 38 struct hmdfs_dentry_cloud *dentry = NULL; 39 struct clearcache_item *cache_item = NULL; 40 struct hmdfs_dcache_lookup_ctx_cloud ctx; 41 struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); 42 43 err = get_cloud_cache_file(child_dentry->d_parent, sbi); 44 if (unlikely(err != 0)) 45 return NULL; 46 cache_item = hmdfs_find_cache_item(dev_id, child_dentry->d_parent); 47 if (!cache_item) 48 return NULL; 49 50 lookup_ret = kmalloc(sizeof(*lookup_ret), GFP_KERNEL); 51 if (!lookup_ret) 52 goto out; 53 54 hmdfs_init_dcache_lookup_ctx_cloud(&ctx, sbi, qstr, cache_item->filp); 55 dentry = hmdfs_find_dentry_cloud(child_dentry, &ctx); 56 if (!dentry) { 57 kfree(lookup_ret); 58 lookup_ret = NULL; 59 goto out; 60 } 61 62 lookup_ret->i_mode = le16_to_cpu(dentry->i_mode); 63 lookup_ret->i_size = le64_to_cpu(dentry->i_size); 64 lookup_ret->i_mtime = le64_to_cpu(dentry->i_mtime); 65 memcpy(lookup_ret->record_id, dentry->record_id, CLOUD_RECORD_ID_LEN); 66 memcpy(lookup_ret->reserved, dentry->reserved, CLOUD_DENTRY_RESERVED_LENGTH); 67 68 hmdfs_unlock_file(ctx.filp, get_dentry_group_pos(ctx.bidx), 69 DENTRYGROUP_SIZE); 70 kfree(ctx.page); 71out: 72 kref_put(&cache_item->ref, release_cache_item); 73 return lookup_ret; 74} 75 76static struct hmdfs_lookup_cloud_ret * 77hmdfs_lookup_by_cloud(struct dentry *dentry, unsigned int flags) 78{ 79 struct hmdfs_lookup_cloud_ret *result = NULL; 80 char *file_name = NULL; 81 struct qstr qstr; 82 int file_name_len = dentry->d_name.len; 83 84 file_name = kzalloc(NAME_MAX + 1, GFP_KERNEL); 85 if (!file_name) 86 return NULL; 87 strncpy(file_name, dentry->d_name.name, file_name_len); 88 qstr.name = file_name; 89 qstr.len = strlen(file_name); 90 91 result = lookup_cloud_dentry(dentry, &qstr, CLOUD_DEVICE); 92 93 kfree(file_name); 94 return result; 95} 96 97/* 98 * hmdfs_update_inode_size - update inode size when finding aready existed 99 * inode. 100 * 101 * First of all, if the file is opened for writing, we don't update inode size 102 * here, because inode size is about to be changed after writing. 103 * 104 * If the file is not opened, simply update getattr_isize(not actual inode size, 105 * just a value showed to user). This is safe because inode size will be 106 * up-to-date after open. 107 * 108 * If the file is opened for read: 109 * a. getattr_isize == HMDFS_STALE_REMOTE_ISIZE 110 * 1) i_size == new_size, nothing need to be done. 111 * 2) i_size > new_size, we keep the i_size and set getattr_isize to new_size, 112 * stale data might be readed in this case, which is fine because file is 113 * opened before remote truncate the file. 114 * 3) i_size < new_size, we drop the last page of the file if i_size is not 115 * aligned to PAGE_SIZE, clear getattr_isize, and update i_size to 116 * new_size. 117 * b. getattr_isize != HMDFS_STALE_REMOTE_ISIZE, getattr_isize will only be set 118 * after 2). 119 * 4) getattr_isize > i_size, this situation is impossible. 120 * 5) i_size >= new_size, this case is the same as 2). 121 * 6) i_size < new_size, this case is the same as 3). 122 */ 123static void hmdfs_update_inode_size(struct inode *inode, uint64_t new_size) 124{ 125 struct hmdfs_inode_info *info = hmdfs_i(inode); 126 int writecount; 127 uint64_t size; 128 129 inode_lock(inode); 130 size = info->getattr_isize; 131 if (size == HMDFS_STALE_REMOTE_ISIZE) 132 size = i_size_read(inode); 133 if (size == new_size) { 134 inode_unlock(inode); 135 return; 136 } 137 138 writecount = atomic_read(&inode->i_writecount); 139 /* check if writing is in progress */ 140 if (writecount > 0) { 141 info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; 142 inode_unlock(inode); 143 return; 144 } 145 146 /* check if there is no one who opens the file */ 147 if (kref_read(&info->ref) == 0) 148 goto update_info; 149 150 /* check if there is someone who opens the file for read */ 151 if (writecount == 0) { 152 uint64_t aligned_size; 153 154 /* use inode size here instead of getattr_isize */ 155 size = i_size_read(inode); 156 if (new_size <= size) 157 goto update_info; 158 /* 159 * if the old inode size is not aligned to HMDFS_PAGE_SIZE, we 160 * need to drop the last page of the inode, otherwise zero will 161 * be returned while reading the new range in the page after 162 * chaning inode size. 163 */ 164 aligned_size = round_down(size, HMDFS_PAGE_SIZE); 165 if (aligned_size != size) 166 truncate_inode_pages(inode->i_mapping, aligned_size); 167 i_size_write(inode, new_size); 168 info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; 169 inode_unlock(inode); 170 return; 171 } 172 173update_info: 174 info->getattr_isize = new_size; 175 inode_unlock(inode); 176} 177 178static void hmdfs_update_inode(struct inode *inode, 179 struct hmdfs_lookup_cloud_ret *lookup_result) 180{ 181 struct hmdfs_time_t remote_mtime = { 182 .tv_sec = lookup_result->i_mtime, 183 .tv_nsec = 0, 184 }; 185 186 /* 187 * We only update mtime if the file is not opened for writing. If we do 188 * update it before writing is about to start, user might see the mtime 189 * up-and-down if system time in server and client do not match. However 190 * mtime in client will eventually match server after timeout without 191 * writing. 192 */ 193 if (!inode_is_open_for_write(inode)) 194 inode->i_mtime = remote_mtime; 195 196 /* 197 * We don't care i_size of dir, and lock inode for dir 198 * might cause deadlock. 199 */ 200 if (S_ISREG(inode->i_mode)) 201 hmdfs_update_inode_size(inode, lookup_result->i_size); 202} 203 204static void hmdfs_fill_inode_permission(struct inode *inode, struct inode *dir, 205 umode_t mode) 206{ 207#ifdef CONFIG_HMDFS_FS_PERMISSION 208 inode->i_uid = dir->i_uid; 209 inode->i_gid = dir->i_gid; 210#endif 211} 212 213struct hmdfs_peer peer; 214 215struct inode *fill_inode_cloud(struct super_block *sb, struct hmdfs_lookup_cloud_ret *res, struct inode *dir) 216{ 217 int ret = 0; 218 struct inode *inode = NULL; 219 struct hmdfs_inode_info *info; 220 umode_t mode = res->i_mode; 221 peer.device_id = CLOUD_DEVICE; 222 223 inode = hmdfs_iget5_locked_cloud(sb, &peer, res); 224 if (!inode) 225 return ERR_PTR(-ENOMEM); 226 227 info = hmdfs_i(inode); 228 info->inode_type = HMDFS_LAYER_OTHER_CLOUD; 229 /* the inode was found in cache */ 230 if (!(inode->i_state & I_NEW)) { 231 hmdfs_fill_inode_permission(inode, dir, mode); 232 hmdfs_update_inode(inode, res); 233 return inode; 234 } 235 236 inode->i_ctime.tv_sec = 0; 237 inode->i_ctime.tv_nsec = 0; 238 inode->i_mtime.tv_sec = res->i_mtime; 239 inode->i_mtime.tv_nsec = 0; 240 241 inode->i_uid = USER_DATA_RW_UID; 242 inode->i_gid = USER_DATA_RW_GID; 243 244 if (S_ISDIR(mode)) 245 inode->i_mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IXOTH; 246 else if (S_ISREG(mode)) 247 inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; 248 else { 249 ret = -EIO; 250 goto bad_inode; 251 } 252 253 if (S_ISREG(mode)) { 254 inode->i_op = &hmdfs_dev_file_iops_cloud; 255 inode->i_fop = &hmdfs_dev_file_fops_cloud; 256 inode->i_size = res->i_size; 257 set_nlink(inode, 1); 258 } else if (S_ISDIR(mode)) { 259 inode->i_op = &hmdfs_dev_dir_inode_ops_cloud; 260 inode->i_fop = &hmdfs_dev_dir_ops_cloud; 261 set_nlink(inode, 2); 262 } else { 263 ret = -EIO; 264 goto bad_inode; 265 } 266 267 inode->i_mapping->a_ops = &hmdfs_dev_file_aops_cloud; 268 269 hmdfs_fill_inode_permission(inode, dir, mode); 270 unlock_new_inode(inode); 271 return inode; 272bad_inode: 273 iget_failed(inode); 274 return ERR_PTR(ret); 275} 276 277static struct dentry *hmdfs_lookup_cloud_dentry(struct inode *parent_inode, 278 struct dentry *child_dentry, 279 int flags) 280{ 281 struct dentry *ret = NULL; 282 struct inode *inode = NULL; 283 struct super_block *sb = parent_inode->i_sb; 284 struct hmdfs_lookup_cloud_ret *lookup_result = NULL; 285 struct hmdfs_dentry_info *gdi = hmdfs_d(child_dentry); 286 287 lookup_result = hmdfs_lookup_by_cloud(child_dentry, flags); 288 if (lookup_result != NULL) { 289 if (in_share_dir(child_dentry)) 290 gdi->file_type = HM_SHARE; 291 inode = fill_inode_cloud(sb, lookup_result, parent_inode); 292 if (IS_ERR(inode)) { 293 ret = ERR_CAST(inode); 294 goto out; 295 } 296 297 check_and_fixup_ownership_remote(parent_inode, 298 inode, 299 child_dentry); 300 ret = d_splice_alias(inode, child_dentry); 301 if (!IS_ERR_OR_NULL(ret)) 302 child_dentry = ret; 303 } else { 304 ret = ERR_PTR(-ENOENT); 305 } 306 307out: 308 kfree(lookup_result); 309 return ret; 310} 311 312struct dentry *hmdfs_lookup_cloud(struct inode *parent_inode, 313 struct dentry *child_dentry, 314 unsigned int flags) 315{ 316 int err = 0; 317 struct dentry *ret = NULL; 318 struct hmdfs_dentry_info *gdi = NULL; 319 struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); 320 321 trace_hmdfs_lookup_remote(parent_inode, child_dentry, flags); 322 if (child_dentry->d_name.len > NAME_MAX) { 323 err = -ENAMETOOLONG; 324 ret = ERR_PTR(-ENAMETOOLONG); 325 goto out; 326 } 327 328 err = init_hmdfs_dentry_info(sbi, child_dentry, 329 HMDFS_LAYER_OTHER_CLOUD); 330 if (err) { 331 ret = ERR_PTR(err); 332 goto out; 333 } 334 gdi = hmdfs_d(child_dentry); 335 gdi->device_id = hmdfs_d(child_dentry->d_parent)->device_id; 336 337 ret = hmdfs_lookup_cloud_dentry(parent_inode, child_dentry, flags); 338 /* 339 * don't return error if inode do not exist, so that vfs can continue 340 * to create it. 341 */ 342 if (IS_ERR_OR_NULL(ret)) { 343 err = PTR_ERR(ret); 344 if (err == -ENOENT) 345 ret = NULL; 346 } else { 347 child_dentry = ret; 348 } 349 350out: 351 if (!err) 352 hmdfs_set_time(child_dentry, jiffies); 353 trace_hmdfs_lookup_remote_end(parent_inode, child_dentry, err); 354 return ret; 355} 356 357int hmdfs_mkdir_cloud(struct inode *dir, struct dentry *dentry, umode_t mode) 358{ 359 return -EPERM; 360} 361 362int hmdfs_create_cloud(struct inode *dir, struct dentry *dentry, umode_t mode, 363 bool want_excl) 364{ 365 return -EPERM; 366} 367 368int hmdfs_rmdir_cloud(struct inode *dir, struct dentry *dentry) 369{ 370 return -EPERM; 371} 372 373int hmdfs_unlink_cloud(struct inode *dir, struct dentry *dentry) 374{ 375 return 0; 376} 377 378int hmdfs_rename_cloud(struct inode *old_dir, struct dentry *old_dentry, 379 struct inode *new_dir, struct dentry *new_dentry, 380 unsigned int flags) 381{ 382 return -EPERM; 383} 384 385static int hmdfs_dir_setattr_cloud(struct dentry *dentry, struct iattr *ia) 386{ 387 // Do not support dir setattr 388 return 0; 389} 390 391const struct inode_operations hmdfs_dev_dir_inode_ops_cloud = { 392 .lookup = hmdfs_lookup_cloud, 393 .mkdir = hmdfs_mkdir_cloud, 394 .create = hmdfs_create_cloud, 395 .rmdir = hmdfs_rmdir_cloud, 396 .unlink = hmdfs_unlink_cloud, 397 .rename = hmdfs_rename_cloud, 398 .setattr = hmdfs_dir_setattr_cloud, 399 .permission = hmdfs_permission, 400}; 401 402static int hmdfs_setattr_cloud(struct dentry *dentry, struct iattr *ia) 403{ 404 struct hmdfs_inode_info *info = hmdfs_i(d_inode(dentry)); 405 struct inode *inode = d_inode(dentry); 406 int err = 0; 407 408 if (hmdfs_inode_is_stashing(info)) 409 return -EAGAIN; 410 411 if (ia->ia_valid & ATTR_SIZE) { 412 err = inode_newsize_ok(inode, ia->ia_size); 413 if (err) 414 return err; 415 truncate_setsize(inode, ia->ia_size); 416 info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; 417 } 418 if (ia->ia_valid & ATTR_MTIME) 419 inode->i_mtime = ia->ia_mtime; 420 421 return err; 422} 423 424 425static int hmdfs_get_cached_attr_cloud(const struct path *path, 426 struct kstat *stat, u32 request_mask, 427 unsigned int flags) 428{ 429 struct inode *inode = d_inode(path->dentry); 430 struct hmdfs_inode_info *info = hmdfs_i(inode); 431 uint64_t size = info->getattr_isize; 432 433 stat->ino = inode->i_ino; 434 stat->mtime = inode->i_mtime; 435 stat->mode = inode->i_mode; 436 stat->uid.val = inode->i_uid.val; 437 stat->gid.val = inode->i_gid.val; 438 if (size == HMDFS_STALE_REMOTE_ISIZE) 439 size = i_size_read(inode); 440 441 stat->size = size; 442 return 0; 443} 444 445const struct inode_operations hmdfs_dev_file_iops_cloud = { 446 .setattr = hmdfs_setattr_cloud, 447 .permission = hmdfs_permission, 448 .getattr = hmdfs_get_cached_attr_cloud, 449 .listxattr = NULL, 450}; 451