Lines Matching refs:fcd
40 /* Will connect in fcd->free_ranges to keep track of free memory */
106 alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode);
109 __kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms)
114 free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100,
116 if (fcd->nr_free_ranges < free_threshold)
117 queue_delayed_work(system_long_wq, &fcd->free_work,
121 static void kick_dmap_free_worker(struct fuse_conn_dax *fcd,
124 spin_lock(&fcd->lock);
125 __kick_dmap_free_worker(fcd, delay_ms);
126 spin_unlock(&fcd->lock);
129 static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
133 spin_lock(&fcd->lock);
134 dmap = list_first_entry_or_null(&fcd->free_ranges,
138 WARN_ON(fcd->nr_free_ranges <= 0);
139 fcd->nr_free_ranges--;
141 __kick_dmap_free_worker(fcd, 0);
142 spin_unlock(&fcd->lock);
147 /* This assumes fcd->lock is held */
148 static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd,
152 WARN_ON(fcd->nr_busy_ranges == 0);
153 fcd->nr_busy_ranges--;
156 static void dmap_remove_busy_list(struct fuse_conn_dax *fcd,
159 spin_lock(&fcd->lock);
160 __dmap_remove_busy_list(fcd, dmap);
161 spin_unlock(&fcd->lock);
164 /* This assumes fcd->lock is held */
165 static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
168 list_add_tail(&dmap->list, &fcd->free_ranges);
169 fcd->nr_free_ranges++;
170 wake_up(&fcd->range_waitq);
173 static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
177 spin_lock(&fcd->lock);
178 __dmap_add_to_free_pool(fcd, dmap);
179 spin_unlock(&fcd->lock);
187 struct fuse_conn_dax *fcd = fm->fc->dax;
194 WARN_ON(fcd->nr_free_ranges < 0);
225 spin_lock(&fcd->lock);
226 list_add_tail(&dmap->busy_list, &fcd->busy_ranges);
227 fcd->nr_busy_ranges++;
228 spin_unlock(&fcd->lock);
289 * fcd->lock held.
291 static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd,
297 __dmap_remove_busy_list(fcd, dmap);
300 __dmap_add_to_free_pool(fcd, dmap);
309 static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
345 spin_lock(&fcd->lock);
348 dmap_reinit_add_to_free_pool(fcd, dmap);
350 spin_unlock(&fcd->lock);
437 struct fuse_conn_dax *fcd = fc->dax;
455 alloc_dmap = alloc_dax_mapping(fcd);
459 alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode);
481 dmap_add_to_free_pool(fcd, alloc_dmap);
490 dmap_add_to_free_pool(fcd, alloc_dmap);
796 struct fuse_conn_dax *fcd = fc->dax;
802 if (retry && !(fcd->nr_free_ranges > 0))
803 wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0));
946 inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
1007 dmap_remove_busy_list(fcd, dmap);
1022 alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
1030 dmap = alloc_dax_mapping(fcd);
1034 dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry);
1060 if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
1061 if (wait_event_killable_exclusive(fcd->range_waitq,
1062 (fcd->nr_free_ranges > 0))) {
1069 static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
1095 spin_lock(&fcd->lock);
1096 dmap_reinit_add_to_free_pool(fcd, dmap);
1097 spin_unlock(&fcd->lock);
1108 static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
1127 ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
1134 static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd,
1148 spin_lock(&fcd->lock);
1150 if (!fcd->nr_busy_ranges) {
1151 spin_unlock(&fcd->lock);
1155 list_for_each_entry_safe(pos, temp, &fcd->busy_ranges,
1175 list_move_tail(&dmap->busy_list, &fcd->busy_ranges);
1179 spin_unlock(&fcd->lock);
1183 ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx);
1195 struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax,
1197 ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK);
1204 kick_dmap_free_worker(fcd, 1);
1229 static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
1237 init_waitqueue_head(&fcd->range_waitq);
1238 INIT_LIST_HEAD(&fcd->free_ranges);
1239 INIT_LIST_HEAD(&fcd->busy_ranges);
1240 INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
1243 nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size),
1269 list_add_tail(&range->list, &fcd->free_ranges);
1272 fcd->nr_free_ranges = nr_ranges;
1273 fcd->nr_ranges = nr_ranges;
1277 fuse_free_dax_mem_ranges(&fcd->free_ranges);
1284 struct fuse_conn_dax *fcd;
1292 fcd = kzalloc(sizeof(*fcd), GFP_KERNEL);
1293 if (!fcd)
1296 spin_lock_init(&fcd->lock);
1297 fcd->dev = dax_dev;
1298 err = fuse_dax_mem_range_init(fcd);
1300 kfree(fcd);
1304 fc->dax = fcd;
1383 struct fuse_conn_dax *fcd = fc->dax;
1385 if (fcd)
1386 cancel_delayed_work_sync(&fcd->free_work);