Lines Matching refs:dax

3  * dax: direct host memory access
10 #include <linux/dax.h>
58 /* reference count when the mapping is used by dax iomap. */
62 /* Per-inode dax map */
86 /* Wait queue for a dax range to become free */
187 struct fuse_conn_dax *fcd = fm->fc->dax;
222 /* Protected by fi->dax->sem */
223 interval_tree_insert(&dmap->itn, &fi->dax->tree);
224 fi->dax->nr++;
322 node = interval_tree_iter_first(&fi->dax->tree, start_idx,
329 interval_tree_remove(&dmap->itn, &fi->dax->tree);
338 WARN_ON(fi->dax->nr < num);
339 fi->dax->nr -= num;
370 * this function does not take any locks like fi->dax->sem for traversing
384 inode_reclaim_dmap_range(fc->dax, inode, 0, -1);
385 WARN_ON(fi->dax->nr);
417 * use. This assumes fi->dax->sem mutex is held either
437 struct fuse_conn_dax *fcd = fc->dax;
472 down_write(&fi->dax->sem);
477 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
482 up_write(&fi->dax->sem);
491 up_write(&fi->dax->sem);
495 up_write(&fi->dax->sem);
513 down_write(&fi->dax->sem);
514 node = interval_tree_iter_first(&fi->dax->tree, idx, idx);
520 * re-acquired the fi->dax->sem lock.
529 * Now we hold fi->dax->sem lock and that reference is not needed
554 up_write(&fi->dax->sem);
579 iomap->dax_dev = fc->dax->dev;
588 down_read(&fi->dax->sem);
589 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
594 * require exclusive fi->dax->sem lock as we don't want
599 * Before dropping fi->dax->sem lock, take reference
603 up_read(&fi->dax->sem);
610 up_read(&fi->dax->sem);
614 up_read(&fi->dax->sem);
761 /* Do not use dax for file extending writes as write and on
784 return dax_writeback_mapping_range(mapping, fc->dax->dev, wbc);
796 struct fuse_conn_dax *fcd = fc->dax;
807 * fuse dax memory range reclaim. While a range is being reclaimed,
903 /* Remove dax mapping from inode interval tree now */
904 interval_tree_remove(&dmap->itn, &fi->dax->tree);
905 fi->dax->nr--;
920 * to hold fi->dax->sem lock either shared or exclusive.
928 for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node;
959 down_read(&fi->dax->sem);
966 up_read(&fi->dax->sem);
982 down_write(&fi->dax->sem);
983 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
1015 up_write(&fi->dax->sem);
1049 * mapping->invalidate_lock. So sleep only if fi->dax->nr=0
1055 * We are not holding fi->dax->sem. So it is possible
1060 if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
1078 /* Find fuse dax mapping at file offset inode. */
1079 node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx);
1104 * 1. Take mapping->invalidate_lock to block dax faults.
1105 * 2. Take fi->dax->sem to protect interval tree and also to make sure
1126 down_write(&fi->dax->sem);
1128 up_write(&fi->dax->sem);
1222 if (fc->dax) {
1223 fuse_free_dax_mem_ranges(&fc->dax->free_ranges);
1224 kfree(fc->dax);
1225 fc->dax = NULL;
1252 pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n",
1304 fc->dax = fcd;
1312 fi->dax = NULL;
1313 if (fc->dax) {
1314 fi->dax = kzalloc(sizeof(*fi->dax), GFP_KERNEL_ACCOUNT);
1315 if (!fi->dax)
1318 init_rwsem(&fi->dax->sem);
1319 fi->dax->tree = RB_ROOT_CACHED;
1340 * fc->dax may be NULL in 'inode' mode when filesystem device doesn't
1343 if (!fc->dax)
1373 if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) {
1374 pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n",
1383 struct fuse_conn_dax *fcd = fc->dax;