Lines Matching refs:info
142 struct shmem_inode_info *info, pgoff_t index);
218 struct shmem_inode_info *info = SHMEM_I(inode);
221 if (shmem_acct_block(info->flags, pages))
234 shmem_unacct_blocks(info->flags, pages);
240 struct shmem_inode_info *info = SHMEM_I(inode);
245 shmem_unacct_blocks(info->flags, pages);
357 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
358 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
364 struct shmem_inode_info *info = SHMEM_I(inode);
367 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
369 info->alloced -= freed;
377 struct shmem_inode_info *info = SHMEM_I(inode);
386 spin_lock_irqsave(&info->lock, flags);
387 info->alloced += pages;
390 spin_unlock_irqrestore(&info->lock, flags);
397 struct shmem_inode_info *info = SHMEM_I(inode);
402 spin_lock_irqsave(&info->lock, flags);
403 info->alloced -= pages;
406 spin_unlock_irqrestore(&info->lock, flags);
527 struct shmem_inode_info *info;
537 info = list_entry(pos, struct shmem_inode_info, shrinklist);
540 inode = igrab(&info->vfs_inode);
544 list_del_init(&info->shrinklist);
551 list_move(&info->shrinklist, &to_remove);
555 list_move(&info->shrinklist, &list);
564 info = list_entry(pos, struct shmem_inode_info, shrinklist);
565 inode = &info->vfs_inode;
566 list_del_init(&info->shrinklist);
573 info = list_entry(pos, struct shmem_inode_info, shrinklist);
574 inode = &info->vfs_inode;
612 list_del_init(&info->shrinklist);
622 list_move(&info->shrinklist, &sbinfo->shrinklist);
820 struct shmem_inode_info *info = SHMEM_I(inode);
824 /* Be careful as we don't hold info->lock */
825 swapped = READ_ONCE(info->swapped);
908 struct shmem_inode_info *info = SHMEM_I(inode);
1055 spin_lock_irq(&info->lock);
1056 info->swapped -= nr_swaps_freed;
1058 spin_unlock_irq(&info->lock);
1072 struct shmem_inode_info *info = SHMEM_I(inode);
1075 if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
1076 spin_lock_irq(&info->lock);
1078 spin_unlock_irq(&info->lock);
1091 struct shmem_inode_info *info = SHMEM_I(inode);
1104 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1105 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1121 if (info->alloced)
1139 if (list_empty_careful(&info->shrinklist)) {
1140 list_add_tail(&info->shrinklist,
1157 struct shmem_inode_info *info = SHMEM_I(inode);
1161 shmem_unacct_size(info->flags, inode->i_size);
1164 if (!list_empty(&info->shrinklist)) {
1166 if (!list_empty(&info->shrinklist)) {
1167 list_del_init(&info->shrinklist);
1172 while (!list_empty(&info->swaplist)) {
1174 wait_var_event(&info->stop_eviction,
1175 !atomic_read(&info->stop_eviction));
1178 if (!atomic_read(&info->stop_eviction))
1179 list_del_init(&info->swaplist);
1184 simple_xattrs_free(&info->xattrs);
1322 struct shmem_inode_info *info, *next;
1329 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) {
1330 if (!info->swapped) {
1331 list_del_init(&info->swaplist);
1340 atomic_inc(&info->stop_eviction);
1343 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap,
1348 next = list_next_entry(info, swaplist);
1349 if (!info->swapped)
1350 list_del_init(&info->swaplist);
1351 if (atomic_dec_and_test(&info->stop_eviction))
1352 wake_up_var(&info->stop_eviction);
1366 struct shmem_inode_info *info;
1377 info = SHMEM_I(inode);
1378 if (info->flags & VM_LOCKED)
1440 if (list_empty(&info->swaplist))
1441 list_add(&info->swaplist, &shmem_swaplist);
1446 spin_lock_irq(&info->lock);
1448 info->swapped++;
1449 spin_unlock_irq(&info->lock);
1508 struct shmem_inode_info *info, pgoff_t index)
1513 vma->vm_pgoff = index + info->vfs_inode.i_ino;
1514 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1524 struct shmem_inode_info *info, pgoff_t index)
1530 shmem_pseudo_vma_init(&pvma, info, index);
1540 struct shmem_inode_info *info, pgoff_t index)
1543 struct address_space *mapping = info->vfs_inode.i_mapping;
1552 shmem_pseudo_vma_init(&pvma, info, hindex);
1564 struct shmem_inode_info *info, pgoff_t index)
1569 shmem_pseudo_vma_init(&pvma, info, index);
1580 struct shmem_inode_info *info = SHMEM_I(inode);
1593 page = shmem_alloc_hugepage(gfp, info, index);
1595 page = shmem_alloc_page(gfp, info, index);
1626 struct shmem_inode_info *info, pgoff_t index)
1644 newpage = shmem_alloc_page(gfp, info, index);
1704 struct shmem_inode_info *info = SHMEM_I(inode);
1724 page = shmem_swapin(swap, gfp, info, index);
1751 error = shmem_replace_page(&page, gfp, info, index);
1762 spin_lock_irq(&info->lock);
1763 info->swapped--;
1765 spin_unlock_irq(&info->lock);
1804 struct shmem_inode_info *info = SHMEM_I(inode);
1939 spin_lock_irq(&info->lock);
1940 info->alloced += compound_nr(page);
1943 spin_unlock_irq(&info->lock);
1958 if (list_empty_careful(&info->shrinklist)) {
1959 list_add_tail(&info->shrinklist,
1993 spin_lock_irq(&info->lock);
1995 spin_unlock_irq(&info->lock);
2021 spin_lock_irq(&info->lock);
2023 spin_unlock_irq(&info->lock);
2236 struct shmem_inode_info *info = SHMEM_I(inode);
2240 * What serializes the accesses to info->flags?
2244 if (lock && !(info->flags & VM_LOCKED)) {
2247 info->flags |= VM_LOCKED;
2250 if (!lock && (info->flags & VM_LOCKED) && user) {
2252 info->flags &= ~VM_LOCKED;
2263 struct shmem_inode_info *info = SHMEM_I(file_inode(file));
2266 ret = seal_check_future_write(info->seals, vma);
2287 struct shmem_inode_info *info;
2301 info = SHMEM_I(inode);
2302 memset(info, 0, (char *)inode - (char *)info);
2303 spin_lock_init(&info->lock);
2304 atomic_set(&info->stop_eviction, 0);
2305 info->seals = F_SEAL_SEAL;
2306 info->flags = flags & VM_NORESERVE;
2307 INIT_LIST_HEAD(&info->shrinklist);
2308 INIT_LIST_HEAD(&info->swaplist);
2309 simple_xattrs_init(&info->xattrs);
2321 mpol_shared_policy_init(&info->policy,
2336 mpol_shared_policy_init(&info->policy, NULL);
2360 struct shmem_inode_info *info = SHMEM_I(inode);
2386 page = shmem_alloc_page(gfp, info, pgoff);
2455 spin_lock_irq(&info->lock);
2456 info->alloced++;
2459 spin_unlock_irq(&info->lock);
2522 struct shmem_inode_info *info = SHMEM_I(inode);
2526 if (unlikely(info->seals & (F_SEAL_GROW |
2528 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
2530 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2774 struct shmem_inode_info *info = SHMEM_I(inode);
2791 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
2823 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
3242 struct shmem_inode_info *info = SHMEM_I(inode);
3265 simple_xattr_list_add(&info->xattrs, new_xattr);
3275 struct shmem_inode_info *info = SHMEM_I(inode);
3278 return simple_xattr_get(&info->xattrs, name, buffer, size);
3286 struct shmem_inode_info *info = SHMEM_I(inode);
3289 return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
3316 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3317 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3851 struct shmem_inode_info *info;
3852 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3853 if (!info)
3855 return &info->vfs_inode;
3873 struct shmem_inode_info *info = foo;
3874 inode_init_once(&info->vfs_inode);