Lines Matching refs:nullb

50  * nullb_page is a page in memory for nullb devices.
246 static void null_del_dev(struct nullb *nullb);
248 static struct nullb *null_find_dev_by_name(const char *name);
353 if (!dev->nullb)
363 * Make sure that null_init_hctx() does not access nullb->queues[] past
378 set = dev->nullb->tag_set;
462 null_del_dev(dev->nullb);
643 null_del_dev(dev->nullb);
683 .ci_namebuf = "nullb",
689 static inline int null_cache_active(struct nullb *nullb)
691 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
896 static void null_free_sector(struct nullb *nullb, sector_t sector,
904 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
917 nullb->dev->curr_cache -= PAGE_SIZE;
922 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
927 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
934 nullb->dev->curr_cache += PAGE_SIZE;
968 static struct nullb_page *__null_lookup_page(struct nullb *nullb,
979 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
989 static struct nullb_page *null_lookup_page(struct nullb *nullb,
995 page = __null_lookup_page(nullb, sector, for_write, true);
998 return __null_lookup_page(nullb, sector, for_write, false);
1001 static struct nullb_page *null_insert_page(struct nullb *nullb,
1003 __releases(&nullb->lock)
1004 __acquires(&nullb->lock)
1009 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
1013 spin_unlock_irq(&nullb->lock);
1022 spin_lock_irq(&nullb->lock);
1025 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
1032 spin_lock_irq(&nullb->lock);
1033 return null_lookup_page(nullb, sector, true, ignore_cache);
1036 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
1046 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
1052 ret = radix_tree_delete_item(&nullb->dev->data,
1066 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
1070 nullb->dev->blocksize);
1078 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
1080 nullb->dev->curr_cache -= PAGE_SIZE;
1085 static int null_make_cache_space(struct nullb *nullb, unsigned long n)
1092 if ((nullb->dev->cache_size * 1024 * 1024) >
1093 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
1096 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
1097 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
1103 nullb->cache_flush_pos = c_pages[i]->page->index;
1118 err = null_flush_cache_page(nullb, c_pages[i]);
1127 nullb->cache_flush_pos = 0;
1130 spin_unlock_irq(&nullb->lock);
1131 spin_lock_irq(&nullb->lock);
1138 static int copy_to_nullb(struct nullb *nullb, struct page *source,
1146 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1148 if (null_cache_active(nullb) && !is_fua)
1149 null_make_cache_space(nullb, PAGE_SIZE);
1152 t_page = null_insert_page(nullb, sector,
1153 !null_cache_active(nullb) || is_fua);
1162 null_free_sector(nullb, sector, true);
1170 static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1178 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1181 t_page = null_lookup_page(nullb, sector, false,
1182 !null_cache_active(nullb));
1196 static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
1205 struct nullb *nullb = dev->nullb;
1209 spin_lock_irq(&nullb->lock);
1212 null_free_sector(nullb, sector, false);
1213 if (null_cache_active(nullb))
1214 null_free_sector(nullb, sector, true);
1218 spin_unlock_irq(&nullb->lock);
1223 static int null_handle_flush(struct nullb *nullb)
1227 if (!null_cache_active(nullb))
1230 spin_lock_irq(&nullb->lock);
1232 err = null_make_cache_space(nullb,
1233 nullb->dev->cache_size * 1024 * 1024);
1234 if (err || nullb->dev->curr_cache == 0)
1238 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1239 spin_unlock_irq(&nullb->lock);
1243 static int null_transfer(struct nullb *nullb, struct page *page,
1247 struct nullb_device *dev = nullb->dev;
1253 valid_len = null_zone_valid_read_len(nullb,
1257 err = copy_from_nullb(nullb, page, off,
1264 nullb_fill_pattern(nullb, page, len, off);
1268 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
1277 struct nullb *nullb = cmd->nq->dev->nullb;
1284 spin_lock_irq(&nullb->lock);
1287 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1291 spin_unlock_irq(&nullb->lock);
1296 spin_unlock_irq(&nullb->lock);
1304 struct nullb *nullb = cmd->nq->dev->nullb;
1311 spin_lock_irq(&nullb->lock);
1314 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1318 spin_unlock_irq(&nullb->lock);
1323 spin_unlock_irq(&nullb->lock);
1327 static void null_stop_queue(struct nullb *nullb)
1329 struct request_queue *q = nullb->q;
1331 if (nullb->dev->queue_mode == NULL_Q_MQ)
1335 static void null_restart_queue_async(struct nullb *nullb)
1337 struct request_queue *q = nullb->q;
1339 if (nullb->dev->queue_mode == NULL_Q_MQ)
1346 struct nullb *nullb = dev->nullb;
1350 if (!hrtimer_active(&nullb->bw_timer))
1351 hrtimer_restart(&nullb->bw_timer);
1353 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
1354 null_stop_queue(nullb);
1356 if (atomic_long_read(&nullb->cur_bytes) > 0)
1357 null_restart_queue_async(nullb);
1471 struct nullb *nullb = dev->nullb;
1481 cmd->error = errno_to_blk_status(null_handle_flush(nullb));
1501 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1503 unsigned int mbps = nullb->dev->mbps;
1505 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1508 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1509 null_restart_queue_async(nullb);
1511 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1516 static void nullb_setup_bwtimer(struct nullb *nullb)
1520 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1521 nullb->bw_timer.function = nullb_bwtimer_fn;
1522 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1523 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
1526 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1530 if (nullb->nr_queues != 1)
1531 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1533 return &nullb->queues[index];
1540 struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
1541 struct nullb_queue *nq = nullb_to_queue(nullb);
1590 struct nullb *nullb = set->driver_data;
1595 if (nullb) {
1596 struct nullb_device *dev = nullb->dev;
1759 static void cleanup_queues(struct nullb *nullb)
1763 for (i = 0; i < nullb->nr_queues; i++)
1764 cleanup_queue(&nullb->queues[i]);
1766 kfree(nullb->queues);
1772 struct nullb *nullb = nq->dev->nullb;
1774 nullb->nr_queues--;
1777 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1780 nq->queue_depth = nullb->queue_depth;
1781 nq->dev = nullb->dev;
1789 struct nullb *nullb = hctx->queue->queuedata;
1792 if (should_init_hctx_fail(nullb->dev))
1795 nq = &nullb->queues[hctx_idx];
1797 null_init_queue(nullb, nq);
1798 nullb->nr_queues++;
1813 static void null_del_dev(struct nullb *nullb)
1817 if (!nullb)
1820 dev = nullb->dev;
1822 ida_simple_remove(&nullb_indexes, nullb->index);
1824 list_del_init(&nullb->list);
1826 del_gendisk(nullb->disk);
1828 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1829 hrtimer_cancel(&nullb->bw_timer);
1830 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1831 null_restart_queue_async(nullb);
1834 put_disk(nullb->disk);
1836 nullb->tag_set == &nullb->__tag_set)
1837 blk_mq_free_tag_set(nullb->tag_set);
1838 cleanup_queues(nullb);
1839 if (null_cache_active(nullb))
1840 null_free_device_storage(nullb->dev, true);
1841 kfree(nullb);
1842 dev->nullb = NULL;
1845 static void null_config_discard(struct nullb *nullb)
1847 if (nullb->dev->discard == false)
1850 if (!nullb->dev->memory_backed) {
1851 nullb->dev->discard = false;
1856 if (nullb->dev->zoned) {
1857 nullb->dev->discard = false;
1862 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1863 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
1900 static int setup_queues(struct nullb *nullb)
1907 nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue),
1909 if (!nullb->queues)
1912 nullb->queue_depth = nullb->dev->hw_queue_depth;
1916 static int init_driver_queues(struct nullb *nullb)
1921 for (i = 0; i < nullb->dev->submit_queues; i++) {
1922 nq = &nullb->queues[i];
1924 null_init_queue(nullb, nq);
1929 nullb->nr_queues++;
1934 static int null_gendisk_register(struct nullb *nullb)
1936 sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
1937 struct gendisk *disk = nullb->disk;
1942 disk->first_minor = nullb->index;
1944 if (queue_is_mq(nullb->q))
1948 disk->private_data = nullb;
1949 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1951 if (nullb->dev->zoned) {
1952 int ret = null_register_zoned_dev(nullb);
1961 static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
1968 if (nullb) {
1969 hw_queues = nullb->dev->submit_queues;
1970 poll_queues = nullb->dev->poll_queues;
1971 queue_depth = nullb->dev->hw_queue_depth;
1972 numa_node = nullb->dev->home_node;
1973 if (nullb->dev->no_sched)
1975 if (nullb->dev->shared_tag_bitmap)
1977 if (nullb->dev->blocking)
1995 set->driver_data = nullb;
2085 struct nullb *nullb;
2092 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
2093 if (!nullb) {
2097 nullb->dev = dev;
2098 dev->nullb = nullb;
2100 spin_lock_init(&nullb->lock);
2102 rv = setup_queues(nullb);
2108 nullb->tag_set = &tag_set;
2111 nullb->tag_set = &nullb->__tag_set;
2112 rv = null_init_tag_set(nullb, nullb->tag_set);
2118 nullb->tag_set->timeout = 5 * HZ;
2119 nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb);
2120 if (IS_ERR(nullb->disk)) {
2121 rv = PTR_ERR(nullb->disk);
2124 nullb->q = nullb->disk->queue;
2127 nullb->disk = blk_alloc_disk(nullb->dev->home_node);
2128 if (!nullb->disk)
2131 nullb->q = nullb->disk->queue;
2132 rv = init_driver_queues(nullb);
2139 nullb_setup_bwtimer(nullb);
2143 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
2144 blk_queue_write_cache(nullb->q, true, true);
2148 rv = null_init_zoned_dev(dev, nullb->q);
2153 nullb->q->queuedata = nullb;
2154 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
2162 nullb->index = rv;
2166 blk_queue_logical_block_size(nullb->q, dev->blocksize);
2167 blk_queue_physical_block_size(nullb->q, dev->blocksize);
2169 blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
2172 blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
2174 null_config_discard(nullb);
2178 snprintf(nullb->disk_name, sizeof(nullb->disk_name),
2181 sprintf(nullb->disk_name, "nullb%d", nullb->index);
2184 rv = null_gendisk_register(nullb);
2189 list_add_tail(&nullb->list, &nullb_list);
2192 pr_info("disk %s created\n", nullb->disk_name);
2197 ida_free(&nullb_indexes, nullb->index);
2201 put_disk(nullb->disk);
2203 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
2204 blk_mq_free_tag_set(nullb->tag_set);
2206 cleanup_queues(nullb);
2208 kfree(nullb);
2209 dev->nullb = NULL;
2214 static struct nullb *null_find_dev_by_name(const char *name)
2216 struct nullb *nullb = NULL, *nb;
2221 nullb = nb;
2227 return nullb;
2248 static void null_destroy_dev(struct nullb *nullb)
2250 struct nullb_device *dev = nullb->dev;
2252 null_del_dev(nullb);
2261 struct nullb *nullb;
2309 null_major = register_blkdev(0, "nullb");
2326 nullb = list_entry(nullb_list.next, struct nullb, list);
2327 null_destroy_dev(nullb);
2329 unregister_blkdev(null_major, "nullb");
2340 struct nullb *nullb;
2344 unregister_blkdev(null_major, "nullb");
2348 nullb = list_entry(nullb_list.next, struct nullb, list);
2349 null_destroy_dev(nullb);