Lines Matching refs:dmap
64 /* Semaphore to protect modifications to the dmap tree */
131 struct fuse_dax_mapping *dmap;
134 dmap = list_first_entry_or_null(&fcd->free_ranges,
136 if (dmap) {
137 list_del_init(&dmap->list);
144 return dmap;
149 struct fuse_dax_mapping *dmap)
151 list_del_init(&dmap->busy_list);
157 struct fuse_dax_mapping *dmap)
160 __dmap_remove_busy_list(fcd, dmap);
166 struct fuse_dax_mapping *dmap)
168 list_add_tail(&dmap->list, &fcd->free_ranges);
174 struct fuse_dax_mapping *dmap)
178 __dmap_add_to_free_pool(fcd, dmap);
183 struct fuse_dax_mapping *dmap, bool writable,
200 inarg.moffset = dmap->window_offset;
213 dmap->writable = writable;
218 * cleanup dmap entries.
220 dmap->inode = inode;
221 dmap->itn.start = dmap->itn.last = start_idx;
223 interval_tree_insert(&dmap->itn, &fi->dax->tree);
226 list_add_tail(&dmap->busy_list, &fcd->busy_ranges);
256 struct fuse_dax_mapping *dmap;
265 list_for_each_entry(dmap, to_remove, list) {
266 ptr->moffset = dmap->window_offset;
267 ptr->len = dmap->length;
288 * Cleanup dmap entry and add back to free list. This should be called with
292 struct fuse_dax_mapping *dmap)
295 dmap->itn.start, dmap->itn.last, dmap->window_offset,
296 dmap->length);
297 __dmap_remove_busy_list(fcd, dmap);
298 dmap->inode = NULL;
299 dmap->itn.start = dmap->itn.last = 0;
300 __dmap_add_to_free_pool(fcd, dmap);
304 * Free inode dmap entries whose range falls inside [start, end].
306 * called from evict_inode() path where we know all dmap entries can be
314 struct fuse_dax_mapping *dmap, *n;
326 dmap = node_to_dmap(node);
327 /* inode is going away. There should not be any users of dmap */
328 WARN_ON(refcount_read(&dmap->refcnt) > 1);
329 interval_tree_remove(&dmap->itn, &fi->dax->tree);
331 list_add(&dmap->list, &to_remove);
346 list_for_each_entry_safe(dmap, n, &to_remove, list) {
347 list_del_init(&dmap->list);
348 dmap_reinit_add_to_free_pool(fcd, dmap);
354 struct fuse_dax_mapping *dmap)
362 forget_one.moffset = dmap->window_offset;
363 forget_one.len = dmap->length;
396 struct iomap *iomap, struct fuse_dax_mapping *dmap,
402 offset = pos - (dmap->itn.start << FUSE_DAX_SHIFT);
403 len = min(length, dmap->length - offset);
410 iomap->addr = dmap->window_offset + offset;
416 * increace refcnt so that reclaim code knows this dmap is in
420 refcount_inc(&dmap->refcnt);
424 iomap->private = dmap;
438 struct fuse_dax_mapping *dmap, *alloc_dmap = NULL;
479 dmap = node_to_dmap(node);
480 fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
504 struct fuse_dax_mapping *dmap;
517 * ensure that dmap can't be truncated. We are holding a reference
518 * on dmap and that should make sure it can't be reclaimed. So dmap
526 dmap = node_to_dmap(node);
528 /* We took an extra reference on dmap to make sure its not reclaimd.
532 if (refcount_dec_and_test(&dmap->refcnt)) {
542 if (dmap->writable) {
547 ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, dmap, true,
552 fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
567 struct fuse_dax_mapping *dmap;
591 dmap = node_to_dmap(node);
592 if (writable && !dmap->writable) {
596 * for same dmap. So drop shared lock and acquire
600 * on dmap so that its not freed by range reclaim.
602 refcount_inc(&dmap->refcnt);
609 fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
639 struct fuse_dax_mapping *dmap = iomap->private;
641 if (dmap) {
642 if (refcount_dec_and_test(&dmap->refcnt)) {
866 struct fuse_dax_mapping *dmap)
869 loff_t start_pos = dmap->itn.start << FUSE_DAX_SHIFT;
890 struct fuse_dax_mapping *dmap)
899 ret = dmap_writeback_invalidate(inode, dmap);
904 interval_tree_remove(&dmap->itn, &fi->dax->tree);
911 ret = dmap_removemapping_one(inode, dmap);
914 dmap->window_offset, dmap->length, ret);
919 /* Find first mapped dmap for an inode and return file offset. Caller needs
925 struct fuse_dax_mapping *dmap;
930 dmap = node_to_dmap(node);
932 if (refcount_read(&dmap->refcnt) > 1)
935 return dmap;
950 struct fuse_dax_mapping *dmap;
958 /* Lookup a dmap and corresponding file offset to reclaim. */
960 dmap = inode_lookup_first_dmap(inode);
961 if (dmap) {
962 start_idx = dmap->itn.start;
968 if (!dmap)
978 dmap = ERR_PTR(ret);
991 dmap = node_to_dmap(node);
993 if (refcount_read(&dmap->refcnt) > 1) {
994 dmap = NULL;
1000 ret = reclaim_one_dmap_locked(inode, dmap);
1002 dmap = ERR_PTR(ret);
1006 /* Clean up dmap. Do not add back to free list */
1007 dmap_remove_busy_list(fcd, dmap);
1008 dmap->inode = NULL;
1009 dmap->itn.start = dmap->itn.last = 0;
1012 __func__, inode, dmap->window_offset, dmap->length);
1018 return dmap;
1024 struct fuse_dax_mapping *dmap;
1030 dmap = alloc_dax_mapping(fcd);
1031 if (dmap)
1032 return dmap;
1034 dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry);
1039 if (dmap)
1040 return dmap;
1075 struct fuse_dax_mapping *dmap;
1084 dmap = node_to_dmap(node);
1087 if (refcount_read(&dmap->refcnt) > 1)
1090 ret = reclaim_one_dmap_locked(inode, dmap);
1094 /* Cleanup dmap entry and add back to free list */
1096 dmap_reinit_add_to_free_pool(fcd, dmap);
1106 * read/write can not reuse a dmap which we might be freeing.
1137 struct fuse_dax_mapping *dmap, *pos, *temp;
1147 dmap = NULL;
1174 dmap = pos;
1175 list_move_tail(&dmap->busy_list, &fcd->busy_ranges);
1176 start_idx = end_idx = dmap->itn.start;
1180 if (!dmap)