1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/hmdfs/inode_cloud.c 4 * 5 * Copyright (c) 2023-2023 Huawei Device Co., Ltd. 6 */ 7 8#include <linux/fs_stack.h> 9#include <linux/namei.h> 10#include <linux/xattr.h> 11#include <linux/string.h> 12 13#include "comm/socket_adapter.h" 14#include "hmdfs.h" 15#include "hmdfs_client.h" 16#include "hmdfs_dentryfile.h" 17#include "hmdfs_dentryfile_cloud.h" 18#include "hmdfs_share.h" 19#include "hmdfs_trace.h" 20#include "authority/authentication.h" 21#include "stash.h" 22 23uint32_t make_ino_raw_cloud(uint8_t *cloud_id) 24{ 25 struct qstr str; 26 27 str.len = CLOUD_RECORD_ID_LEN; 28 str.name = cloud_id; 29 return hmdfs_dentry_hash(&str, CLOUD_RECORD_ID_LEN); 30} 31 32struct hmdfs_lookup_cloud_ret *lookup_cloud_dentry(struct dentry *child_dentry, 33 const struct qstr *qstr, 34 uint64_t dev_id) 35{ 36 struct hmdfs_lookup_cloud_ret *lookup_ret; 37 struct hmdfs_dentry_cloud *dentry = NULL; 38 struct clearcache_item *cache_item = NULL; 39 struct hmdfs_dcache_lookup_ctx_cloud ctx; 40 struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); 41 42 get_cloud_cache_file(child_dentry->d_parent, sbi); 43 cache_item = hmdfs_find_cache_item(dev_id, child_dentry->d_parent); 44 if (!cache_item) 45 return NULL; 46 47 lookup_ret = kmalloc(sizeof(*lookup_ret), GFP_KERNEL); 48 if (!lookup_ret) 49 goto out; 50 51 hmdfs_init_dcache_lookup_ctx_cloud(&ctx, sbi, qstr, cache_item->filp); 52 dentry = hmdfs_find_dentry_cloud(child_dentry, &ctx); 53 if (!dentry) { 54 kfree(lookup_ret); 55 lookup_ret = NULL; 56 goto out; 57 } 58 59 lookup_ret->i_mode = le16_to_cpu(dentry->i_mode); 60 lookup_ret->i_size = le64_to_cpu(dentry->i_size); 61 lookup_ret->i_mtime = le64_to_cpu(dentry->i_mtime); 62 memcpy(lookup_ret->record_id, dentry->record_id, CLOUD_RECORD_ID_LEN); 63 memcpy(lookup_ret->reserved, dentry->reserved, CLOUD_DENTRY_RESERVED_LENGTH); 64 65 hmdfs_unlock_file(ctx.filp, get_dentry_group_pos(ctx.bidx), 66 DENTRYGROUP_SIZE); 67 kfree(ctx.page); 68out: 69 kref_put(&cache_item->ref, release_cache_item); 70 return lookup_ret; 71} 72 73static struct hmdfs_lookup_cloud_ret * 74hmdfs_lookup_by_cloud(struct dentry *dentry, unsigned int flags) 75{ 76 struct hmdfs_lookup_cloud_ret *result = NULL; 77 char *file_name = NULL; 78 struct qstr qstr; 79 int file_name_len = dentry->d_name.len; 80 81 file_name = kzalloc(NAME_MAX + 1, GFP_KERNEL); 82 if (!file_name) 83 return NULL; 84 strncpy(file_name, dentry->d_name.name, file_name_len); 85 qstr.name = file_name; 86 qstr.len = strlen(file_name); 87 88 result = lookup_cloud_dentry(dentry, &qstr, CLOUD_DEVICE); 89 90 kfree(file_name); 91 return result; 92} 93 94/* 95 * hmdfs_update_inode_size - update inode size when finding aready existed 96 * inode. 97 * 98 * First of all, if the file is opened for writing, we don't update inode size 99 * here, because inode size is about to be changed after writing. 100 * 101 * If the file is not opened, simply update getattr_isize(not actual inode size, 102 * just a value showed to user). This is safe because inode size will be 103 * up-to-date after open. 104 * 105 * If the file is opened for read: 106 * a. getattr_isize == HMDFS_STALE_REMOTE_ISIZE 107 * 1) i_size == new_size, nothing need to be done. 108 * 2) i_size > new_size, we keep the i_size and set getattr_isize to new_size, 109 * stale data might be readed in this case, which is fine because file is 110 * opened before remote truncate the file. 111 * 3) i_size < new_size, we drop the last page of the file if i_size is not 112 * aligned to PAGE_SIZE, clear getattr_isize, and update i_size to 113 * new_size. 114 * b. getattr_isize != HMDFS_STALE_REMOTE_ISIZE, getattr_isize will only be set 115 * after 2). 116 * 4) getattr_isize > i_size, this situation is impossible. 117 * 5) i_size >= new_size, this case is the same as 2). 118 * 6) i_size < new_size, this case is the same as 3). 119 */ 120static void hmdfs_update_inode_size(struct inode *inode, uint64_t new_size) 121{ 122 struct hmdfs_inode_info *info = hmdfs_i(inode); 123 int writecount; 124 uint64_t size; 125 126 inode_lock(inode); 127 size = info->getattr_isize; 128 if (size == HMDFS_STALE_REMOTE_ISIZE) 129 size = i_size_read(inode); 130 if (size == new_size) { 131 inode_unlock(inode); 132 return; 133 } 134 135 writecount = atomic_read(&inode->i_writecount); 136 /* check if writing is in progress */ 137 if (writecount > 0) { 138 info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; 139 inode_unlock(inode); 140 return; 141 } 142 143 /* check if there is no one who opens the file */ 144 if (kref_read(&info->ref) == 0) 145 goto update_info; 146 147 /* check if there is someone who opens the file for read */ 148 if (writecount == 0) { 149 uint64_t aligned_size; 150 151 /* use inode size here instead of getattr_isize */ 152 size = i_size_read(inode); 153 if (new_size <= size) 154 goto update_info; 155 /* 156 * if the old inode size is not aligned to HMDFS_PAGE_SIZE, we 157 * need to drop the last page of the inode, otherwise zero will 158 * be returned while reading the new range in the page after 159 * chaning inode size. 160 */ 161 aligned_size = round_down(size, HMDFS_PAGE_SIZE); 162 if (aligned_size != size) 163 truncate_inode_pages(inode->i_mapping, aligned_size); 164 i_size_write(inode, new_size); 165 info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; 166 inode_unlock(inode); 167 return; 168 } 169 170update_info: 171 info->getattr_isize = new_size; 172 inode_unlock(inode); 173} 174 175static void hmdfs_update_inode(struct inode *inode, 176 struct hmdfs_lookup_cloud_ret *lookup_result) 177{ 178 struct hmdfs_time_t remote_mtime = { 179 .tv_sec = lookup_result->i_mtime, 180 .tv_nsec = 0, 181 }; 182 183 /* 184 * We only update mtime if the file is not opened for writing. If we do 185 * update it before writing is about to start, user might see the mtime 186 * up-and-down if system time in server and client do not match. However 187 * mtime in client will eventually match server after timeout without 188 * writing. 189 */ 190 if (!inode_is_open_for_write(inode)) 191 inode->i_mtime = remote_mtime; 192 193 /* 194 * We don't care i_size of dir, and lock inode for dir 195 * might cause deadlock. 196 */ 197 if (S_ISREG(inode->i_mode)) 198 hmdfs_update_inode_size(inode, lookup_result->i_size); 199} 200 201static void hmdfs_fill_inode_permission(struct inode *inode, struct inode *dir, 202 umode_t mode) 203{ 204#ifdef CONFIG_HMDFS_FS_PERMISSION 205 inode->i_uid = dir->i_uid; 206 inode->i_gid = dir->i_gid; 207#endif 208} 209 210struct hmdfs_peer peer; 211 212struct inode *fill_inode_cloud(struct super_block *sb, struct hmdfs_lookup_cloud_ret *res, struct inode *dir) 213{ 214 int ret = 0; 215 struct inode *inode = NULL; 216 struct hmdfs_inode_info *info; 217 umode_t mode = res->i_mode; 218 peer.device_id = CLOUD_DEVICE; 219 220 inode = hmdfs_iget5_locked_cloud(sb, &peer, res); 221 if (!inode) 222 return ERR_PTR(-ENOMEM); 223 224 info = hmdfs_i(inode); 225 info->inode_type = HMDFS_LAYER_OTHER_CLOUD; 226 /* the inode was found in cache */ 227 if (!(inode->i_state & I_NEW)) { 228 hmdfs_fill_inode_permission(inode, dir, mode); 229 hmdfs_update_inode(inode, res); 230 return inode; 231 } 232 233 inode->__i_ctime.tv_sec = 0; 234 inode->__i_ctime.tv_nsec = 0; 235 inode->i_mtime.tv_sec = res->i_mtime; 236 inode->i_mtime.tv_nsec = 0; 237 238 inode->i_uid = USER_DATA_RW_UID; 239 inode->i_gid = USER_DATA_RW_GID; 240 241 if (S_ISDIR(mode)) 242 inode->i_mode = S_IFDIR | S_IRWXU | S_IRWXG | S_IXOTH; 243 else if (S_ISREG(mode)) 244 inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; 245 else { 246 ret = -EIO; 247 goto bad_inode; 248 } 249 250 if (S_ISREG(mode)) { 251 inode->i_op = &hmdfs_dev_file_iops_cloud; 252 inode->i_fop = &hmdfs_dev_file_fops_cloud; 253 inode->i_size = res->i_size; 254 set_nlink(inode, 1); 255 } else if (S_ISDIR(mode)) { 256 inode->i_op = &hmdfs_dev_dir_inode_ops_cloud; 257 inode->i_fop = &hmdfs_dev_dir_ops_cloud; 258 set_nlink(inode, 2); 259 } else { 260 ret = -EIO; 261 goto bad_inode; 262 } 263 264 inode->i_mapping->a_ops = &hmdfs_dev_file_aops_cloud; 265 266 hmdfs_fill_inode_permission(inode, dir, mode); 267 unlock_new_inode(inode); 268 return inode; 269bad_inode: 270 iget_failed(inode); 271 return ERR_PTR(ret); 272} 273 274static struct dentry *hmdfs_lookup_cloud_dentry(struct inode *parent_inode, 275 struct dentry *child_dentry, 276 int flags) 277{ 278 struct dentry *ret = NULL; 279 struct inode *inode = NULL; 280 struct super_block *sb = parent_inode->i_sb; 281 struct hmdfs_lookup_cloud_ret *lookup_result = NULL; 282 struct hmdfs_dentry_info *gdi = hmdfs_d(child_dentry); 283 284 lookup_result = hmdfs_lookup_by_cloud(child_dentry, flags); 285 if (lookup_result != NULL) { 286 if (in_share_dir(child_dentry)) 287 gdi->file_type = HM_SHARE; 288 inode = fill_inode_cloud(sb, lookup_result, parent_inode); 289 if (IS_ERR(inode)) { 290 ret = ERR_CAST(inode); 291 goto out; 292 } 293 294 check_and_fixup_ownership_remote(parent_inode, 295 inode, 296 child_dentry); 297 ret = d_splice_alias(inode, child_dentry); 298 if (!IS_ERR_OR_NULL(ret)) 299 child_dentry = ret; 300 } else { 301 ret = ERR_PTR(-ENOENT); 302 } 303out: 304 kfree(lookup_result); 305 return ret; 306} 307 308struct dentry *hmdfs_lookup_cloud(struct inode *parent_inode, 309 struct dentry *child_dentry, 310 unsigned int flags) 311{ 312 int err = 0; 313 struct dentry *ret = NULL; 314 struct hmdfs_dentry_info *gdi = NULL; 315 struct hmdfs_sb_info *sbi = hmdfs_sb(child_dentry->d_sb); 316 317 trace_hmdfs_lookup_remote(parent_inode, child_dentry, flags); 318 if (child_dentry->d_name.len > NAME_MAX) { 319 err = -ENAMETOOLONG; 320 ret = ERR_PTR(-ENAMETOOLONG); 321 goto out; 322 } 323 324 err = init_hmdfs_dentry_info(sbi, child_dentry, 325 HMDFS_LAYER_OTHER_CLOUD); 326 if (err) { 327 ret = ERR_PTR(err); 328 goto out; 329 } 330 gdi = hmdfs_d(child_dentry); 331 gdi->device_id = hmdfs_d(child_dentry->d_parent)->device_id; 332 333 ret = hmdfs_lookup_cloud_dentry(parent_inode, child_dentry, flags); 334 /* 335 * don't return error if inode do not exist, so that vfs can continue 336 * to create it. 337 */ 338 if (IS_ERR_OR_NULL(ret)) { 339 err = PTR_ERR(ret); 340 if (err == -ENOENT) 341 ret = NULL; 342 } else { 343 child_dentry = ret; 344 } 345 346out: 347 if (!err) 348 hmdfs_set_time(child_dentry, jiffies); 349 trace_hmdfs_lookup_remote_end(parent_inode, child_dentry, err); 350 return ret; 351} 352 353int hmdfs_mkdir_cloud(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) 354{ 355 return -EPERM; 356} 357 358int hmdfs_create_cloud(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, 359 bool want_excl) 360{ 361 return -EPERM; 362} 363 364int hmdfs_rmdir_cloud(struct inode *dir, struct dentry *dentry) 365{ 366 return -EPERM; 367} 368 369int hmdfs_unlink_cloud(struct inode *dir, struct dentry *dentry) 370{ 371 return 0; 372} 373 374int hmdfs_rename_cloud(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, 375 struct inode *new_dir, struct dentry *new_dentry, 376 unsigned int flags) 377{ 378 return -EPERM; 379} 380 381static int hmdfs_dir_setattr_cloud(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *ia) 382{ 383 // Do not support dir setattr 384 return 0; 385} 386 387const struct inode_operations hmdfs_dev_dir_inode_ops_cloud = { 388 .lookup = hmdfs_lookup_cloud, 389 .mkdir = hmdfs_mkdir_cloud, 390 .create = hmdfs_create_cloud, 391 .rmdir = hmdfs_rmdir_cloud, 392 .unlink = hmdfs_unlink_cloud, 393 .rename = hmdfs_rename_cloud, 394 .setattr = hmdfs_dir_setattr_cloud, 395 .permission = hmdfs_permission, 396}; 397 398static int hmdfs_setattr_cloud(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *ia) 399{ 400 struct hmdfs_inode_info *info = hmdfs_i(d_inode(dentry)); 401 struct inode *inode = d_inode(dentry); 402 int err = 0; 403 404 if (hmdfs_inode_is_stashing(info)) 405 return -EAGAIN; 406 407 if (ia->ia_valid & ATTR_SIZE) { 408 err = inode_newsize_ok(inode, ia->ia_size); 409 if (err) 410 return err; 411 truncate_setsize(inode, ia->ia_size); 412 info->getattr_isize = HMDFS_STALE_REMOTE_ISIZE; 413 } 414 if (ia->ia_valid & ATTR_MTIME) 415 inode->i_mtime = ia->ia_mtime; 416 417 return err; 418} 419 420 421static int hmdfs_get_cached_attr_cloud(struct mnt_idmap *idmap, const struct path *path, 422 struct kstat *stat, u32 request_mask, 423 unsigned int flags) 424{ 425 struct inode *inode = d_inode(path->dentry); 426 struct hmdfs_inode_info *info = hmdfs_i(inode); 427 uint64_t size = info->getattr_isize; 428 429 stat->ino = inode->i_ino; 430 stat->mtime = inode->i_mtime; 431 stat->mode = inode->i_mode; 432 stat->uid.val = inode->i_uid.val; 433 stat->gid.val = inode->i_gid.val; 434 if (size == HMDFS_STALE_REMOTE_ISIZE) 435 size = i_size_read(inode); 436 437 stat->size = size; 438 return 0; 439} 440 441const struct inode_operations hmdfs_dev_file_iops_cloud = { 442 .setattr = hmdfs_setattr_cloud, 443 .permission = hmdfs_permission, 444 .getattr = hmdfs_get_cached_attr_cloud, 445 .listxattr = NULL, 446}; 447