Lines Matching refs:dax

3  * dax: direct host memory access
10 #include <linux/dax.h>
57 /* reference count when the mapping is used by dax iomap. */
61 /* Per-inode dax map */
85 /* Wait queue for a dax range to become free */
186 struct fuse_conn_dax *fcd = fm->fc->dax;
221 /* Protected by fi->dax->sem */
222 interval_tree_insert(&dmap->itn, &fi->dax->tree);
223 fi->dax->nr++;
321 node = interval_tree_iter_first(&fi->dax->tree, start_idx,
328 interval_tree_remove(&dmap->itn, &fi->dax->tree);
337 WARN_ON(fi->dax->nr < num);
338 fi->dax->nr -= num;
369 * this function does not take any locks like fi->dax->sem for traversing
383 inode_reclaim_dmap_range(fc->dax, inode, 0, -1);
384 WARN_ON(fi->dax->nr);
416 * use. This assumes fi->dax->sem mutex is held either
436 struct fuse_conn_dax *fcd = fc->dax;
471 down_write(&fi->dax->sem);
476 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
481 up_write(&fi->dax->sem);
490 up_write(&fi->dax->sem);
494 up_write(&fi->dax->sem);
512 down_write(&fi->dax->sem);
513 node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
519 * re-acquired the fi->dax->sem lock.
528 * Now we hold fi->dax->sem lock and that reference is not needed
553 up_write(&fi->dax->sem);
578 iomap->dax_dev = fc->dax->dev;
587 down_read(&fi->dax->sem);
588 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
593 * require exclusive fi->dax->sem lock as we don't want
598 * Before dropping fi->dax->sem lock, take reference
602 up_read(&fi->dax->sem);
609 up_read(&fi->dax->sem);
613 up_read(&fi->dax->sem);
765 /* Do not use dax for file extending writes as write and on
788 return dax_writeback_mapping_range(mapping, fc->dax->dev, wbc);
800 struct fuse_conn_dax *fcd = fc->dax;
811 * fuse dax memory range reclaim. While a range is being reclaimed,
909 /* Remove dax mapping from inode interval tree now */
910 interval_tree_remove(&dmap->itn, &fi->dax->tree);
911 fi->dax->nr--;
926 * to hold fi->dax->sem lock either shared or exclusive.
934 for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node;
965 down_read(&fi->dax->sem);
972 up_read(&fi->dax->sem);
988 down_write(&fi->dax->sem);
989 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
1021 up_write(&fi->dax->sem);
1055 * only if fi->dax->nr=0
1061 * We are not holding fi->dax->sem. So it is possible
1066 if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
1084 /* Find fuse dax mapping at file offset inode. */
1085 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
1110 * 1. Take fi->i_mmap_sem to block dax faults.
1111 * 2. Take fi->dax->sem to protect interval tree and also to make sure
1132 down_write(&fi->dax->sem);
1134 up_write(&fi->dax->sem);
1228 if (fc->dax) {
1229 fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
1230 kfree(fc->dax);
1231 fc->dax = NULL;
1260 pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n",
1309 fc->dax = fcd;
1317 fi->dax = NULL;
1318 if (fc->dax) {
1319 fi->dax = kzalloc(sizeof(*fi->dax), GFP_KERNEL_ACCOUNT);
1320 if (!fi->dax)
1323 init_rwsem(&fi->dax->sem);
1324 fi->dax->tree = RB_ROOT_CACHED;
1341 if (!fc->dax)
1350 if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) {
1351 pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n",
1360 struct fuse_conn_dax *fcd = fc->dax;