Lines Matching refs:fcd

39 	/* Will connect in fcd->free_ranges to keep track of free memory */
105 alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode);
108 __kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms)
113 free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100,
115 if (fcd->nr_free_ranges < free_threshold)
116 queue_delayed_work(system_long_wq, &fcd->free_work,
120 static void kick_dmap_free_worker(struct fuse_conn_dax *fcd,
123 spin_lock(&fcd->lock);
124 __kick_dmap_free_worker(fcd, delay_ms);
125 spin_unlock(&fcd->lock);
128 static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd)
132 spin_lock(&fcd->lock);
133 dmap = list_first_entry_or_null(&fcd->free_ranges,
137 WARN_ON(fcd->nr_free_ranges <= 0);
138 fcd->nr_free_ranges--;
140 spin_unlock(&fcd->lock);
142 kick_dmap_free_worker(fcd, 0);
146 /* This assumes fcd->lock is held */
147 static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd,
151 WARN_ON(fcd->nr_busy_ranges == 0);
152 fcd->nr_busy_ranges--;
155 static void dmap_remove_busy_list(struct fuse_conn_dax *fcd,
158 spin_lock(&fcd->lock);
159 __dmap_remove_busy_list(fcd, dmap);
160 spin_unlock(&fcd->lock);
163 /* This assumes fcd->lock is held */
164 static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
167 list_add_tail(&dmap->list, &fcd->free_ranges);
168 fcd->nr_free_ranges++;
169 wake_up(&fcd->range_waitq);
172 static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd,
176 spin_lock(&fcd->lock);
177 __dmap_add_to_free_pool(fcd, dmap);
178 spin_unlock(&fcd->lock);
186 struct fuse_conn_dax *fcd = fm->fc->dax;
193 WARN_ON(fcd->nr_free_ranges < 0);
224 spin_lock(&fcd->lock);
225 list_add_tail(&dmap->busy_list, &fcd->busy_ranges);
226 fcd->nr_busy_ranges++;
227 spin_unlock(&fcd->lock);
288 * fcd->lock held.
290 static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd,
296 __dmap_remove_busy_list(fcd, dmap);
299 __dmap_add_to_free_pool(fcd, dmap);
308 static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd,
344 spin_lock(&fcd->lock);
347 dmap_reinit_add_to_free_pool(fcd, dmap);
349 spin_unlock(&fcd->lock);
436 struct fuse_conn_dax *fcd = fc->dax;
454 alloc_dmap = alloc_dax_mapping(fcd);
458 alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode);
480 dmap_add_to_free_pool(fcd, alloc_dmap);
489 dmap_add_to_free_pool(fcd, alloc_dmap);
800 struct fuse_conn_dax *fcd = fc->dax;
806 if (retry && !(fcd->nr_free_ranges > 0))
807 wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0));
952 inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode,
1013 dmap_remove_busy_list(fcd, dmap);
1028 alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode)
1036 dmap = alloc_dax_mapping(fcd);
1040 dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry);
1066 if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) {
1067 if (wait_event_killable_exclusive(fcd->range_waitq,
1068 (fcd->nr_free_ranges > 0))) {
1075 static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd,
1101 spin_lock(&fcd->lock);
1102 dmap_reinit_add_to_free_pool(fcd, dmap);
1103 spin_unlock(&fcd->lock);
1114 static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd,
1133 ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx);
1140 static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd,
1154 spin_lock(&fcd->lock);
1156 if (!fcd->nr_busy_ranges) {
1157 spin_unlock(&fcd->lock);
1161 list_for_each_entry_safe(pos, temp, &fcd->busy_ranges,
1181 list_move_tail(&dmap->busy_list, &fcd->busy_ranges);
1185 spin_unlock(&fcd->lock);
1189 ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx);
1201 struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax,
1203 ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK);
1210 kick_dmap_free_worker(fcd, 1);
1235 static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd)
1245 init_waitqueue_head(&fcd->range_waitq);
1246 INIT_LIST_HEAD(&fcd->free_ranges);
1247 INIT_LIST_HEAD(&fcd->busy_ranges);
1248 INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker);
1251 nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), &kaddr,
1277 list_add_tail(&range->list, &fcd->free_ranges);
1280 fcd->nr_free_ranges = nr_ranges;
1281 fcd->nr_ranges = nr_ranges;
1285 fuse_free_dax_mem_ranges(&fcd->free_ranges);
1291 struct fuse_conn_dax *fcd;
1297 fcd = kzalloc(sizeof(*fcd), GFP_KERNEL);
1298 if (!fcd)
1301 spin_lock_init(&fcd->lock);
1302 fcd->dev = dax_dev;
1303 err = fuse_dax_mem_range_init(fcd);
1305 kfree(fcd);
1309 fc->dax = fcd;
1360 struct fuse_conn_dax *fcd = fc->dax;
1362 if (fcd)
1363 cancel_delayed_work_sync(&fcd->free_work);