Lines Matching refs:dmap

63 	/* Semaphore to protect modifications to the dmap tree */
130 struct fuse_dax_mapping *dmap;
133 dmap = list_first_entry_or_null(&fcd->free_ranges,
135 if (dmap) {
136 list_del_init(&dmap->list);
143 return dmap;
148 struct fuse_dax_mapping *dmap)
150 list_del_init(&dmap->busy_list);
156 struct fuse_dax_mapping *dmap)
159 __dmap_remove_busy_list(fcd, dmap);
165 struct fuse_dax_mapping *dmap)
167 list_add_tail(&dmap->list, &fcd->free_ranges);
173 struct fuse_dax_mapping *dmap)
177 __dmap_add_to_free_pool(fcd, dmap);
182 struct fuse_dax_mapping *dmap, bool writable,
199 inarg.moffset = dmap->window_offset;
212 dmap->writable = writable;
217 * cleanup dmap entries.
219 dmap->inode = inode;
220 dmap->itn.start = dmap->itn.last = start_idx;
222 interval_tree_insert(&dmap->itn, &fi->dax->tree);
225 list_add_tail(&dmap->busy_list, &fcd->busy_ranges);
255 struct fuse_dax_mapping *dmap;
264 list_for_each_entry(dmap, to_remove, list) {
265 ptr->moffset = dmap->window_offset;
266 ptr->len = dmap->length;
287 * Cleanup dmap entry and add back to free list. This should be called with
291 struct fuse_dax_mapping *dmap)
294 dmap->itn.start, dmap->itn.last, dmap->window_offset,
295 dmap->length);
296 __dmap_remove_busy_list(fcd, dmap);
297 dmap->inode = NULL;
298 dmap->itn.start = dmap->itn.last = 0;
299 __dmap_add_to_free_pool(fcd, dmap);
303 * Free inode dmap entries whose range falls inside [start, end].
305 * called from evict_inode() path where we know all dmap entries can be
313 struct fuse_dax_mapping *dmap, *n;
325 dmap = node_to_dmap(node);
326 /* inode is going away. There should not be any users of dmap */
327 WARN_ON(refcount_read(&dmap->refcnt) > 1);
328 interval_tree_remove(&dmap->itn, &fi->dax->tree);
330 list_add(&dmap->list, &to_remove);
345 list_for_each_entry_safe(dmap, n, &to_remove, list) {
346 list_del_init(&dmap->list);
347 dmap_reinit_add_to_free_pool(fcd, dmap);
353 struct fuse_dax_mapping *dmap)
361 forget_one.moffset = dmap->window_offset;
362 forget_one.len = dmap->length;
395 struct iomap *iomap, struct fuse_dax_mapping *dmap,
401 offset = pos - (dmap->itn.start << FUSE_DAX_SHIFT);
402 len = min(length, dmap->length - offset);
409 iomap->addr = dmap->window_offset + offset;
415 * increace refcnt so that reclaim code knows this dmap is in
419 refcount_inc(&dmap->refcnt);
423 iomap->private = dmap;
437 struct fuse_dax_mapping *dmap, *alloc_dmap = NULL;
478 dmap = node_to_dmap(node);
479 fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
503 struct fuse_dax_mapping *dmap;
516 * ensure that dmap can't be truncated. We are holding a reference
517 * on dmap and that should make sure it can't be reclaimed. So dmap
525 dmap = node_to_dmap(node);
527 /* We took an extra reference on dmap to make sure its not reclaimd.
531 if (refcount_dec_and_test(&dmap->refcnt)) {
541 if (dmap->writable) {
546 ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, dmap, true,
551 fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
566 struct fuse_dax_mapping *dmap;
590 dmap = node_to_dmap(node);
591 if (writable && !dmap->writable) {
595 * for same dmap. So drop shared lock and acquire
599 * on dmap so that its not freed by range reclaim.
601 refcount_inc(&dmap->refcnt);
608 fuse_fill_iomap(inode, pos, length, iomap, dmap, flags);
638 struct fuse_dax_mapping *dmap = iomap->private;
640 if (dmap) {
641 if (refcount_dec_and_test(&dmap->refcnt)) {
872 struct fuse_dax_mapping *dmap)
875 loff_t start_pos = dmap->itn.start << FUSE_DAX_SHIFT;
896 struct fuse_dax_mapping *dmap)
905 ret = dmap_writeback_invalidate(inode, dmap);
910 interval_tree_remove(&dmap->itn, &fi->dax->tree);
917 ret = dmap_removemapping_one(inode, dmap);
920 dmap->window_offset, dmap->length, ret);
925 /* Find first mapped dmap for an inode and return file offset. Caller needs
931 struct fuse_dax_mapping *dmap;
936 dmap = node_to_dmap(node);
938 if (refcount_read(&dmap->refcnt) > 1)
941 return dmap;
956 struct fuse_dax_mapping *dmap;
964 /* Lookup a dmap and corresponding file offset to reclaim. */
966 dmap = inode_lookup_first_dmap(inode);
967 if (dmap) {
968 start_idx = dmap->itn.start;
974 if (!dmap)
984 dmap = ERR_PTR(ret);
997 dmap = node_to_dmap(node);
999 if (refcount_read(&dmap->refcnt) > 1) {
1000 dmap = NULL;
1006 ret = reclaim_one_dmap_locked(inode, dmap);
1008 dmap = ERR_PTR(ret);
1012 /* Clean up dmap. Do not add back to free list */
1013 dmap_remove_busy_list(fcd, dmap);
1014 dmap->inode = NULL;
1015 dmap->itn.start = dmap->itn.last = 0;
1018 __func__, inode, dmap->window_offset, dmap->length);
1024 return dmap;
1030 struct fuse_dax_mapping *dmap;
1036 dmap = alloc_dax_mapping(fcd);
1037 if (dmap)
1038 return dmap;
1040 dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry);
1045 if (dmap)
1046 return dmap;
1081 struct fuse_dax_mapping *dmap;
1090 dmap = node_to_dmap(node);
1093 if (refcount_read(&dmap->refcnt) > 1)
1096 ret = reclaim_one_dmap_locked(inode, dmap);
1100 /* Cleanup dmap entry and add back to free list */
1102 dmap_reinit_add_to_free_pool(fcd, dmap);
1112 * read/write can not reuse a dmap which we might be freeing.
1143 struct fuse_dax_mapping *dmap, *pos, *temp;
1153 dmap = NULL;
1180 dmap = pos;
1181 list_move_tail(&dmap->busy_list, &fcd->busy_ranges);
1182 start_idx = end_idx = dmap->itn.start;
1186 if (!dmap)