Lines Matching refs:nullb

51  * nullb_page is a page in memory for nullb devices.
225 static void null_del_dev(struct nullb *nullb);
326 struct nullb *nullb = dev->nullb;
329 if (!nullb)
333 * Make sure that null_init_hctx() does not access nullb->queues[] past
338 set = nullb->tag_set;
395 null_del_dev(dev->nullb);
526 null_del_dev(dev->nullb);
560 .ci_namebuf = "nullb",
566 static inline int null_cache_active(struct nullb *nullb)
568 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
758 static void null_free_sector(struct nullb *nullb, sector_t sector,
766 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
779 nullb->dev->curr_cache -= PAGE_SIZE;
784 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
789 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
796 nullb->dev->curr_cache += PAGE_SIZE;
830 static struct nullb_page *__null_lookup_page(struct nullb *nullb,
841 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
851 static struct nullb_page *null_lookup_page(struct nullb *nullb,
857 page = __null_lookup_page(nullb, sector, for_write, true);
860 return __null_lookup_page(nullb, sector, for_write, false);
863 static struct nullb_page *null_insert_page(struct nullb *nullb,
865 __releases(&nullb->lock)
866 __acquires(&nullb->lock)
871 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
875 spin_unlock_irq(&nullb->lock);
884 spin_lock_irq(&nullb->lock);
887 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
894 spin_lock_irq(&nullb->lock);
895 return null_lookup_page(nullb, sector, true, ignore_cache);
898 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
908 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
914 ret = radix_tree_delete_item(&nullb->dev->data,
928 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
932 nullb->dev->blocksize);
940 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
942 nullb->dev->curr_cache -= PAGE_SIZE;
947 static int null_make_cache_space(struct nullb *nullb, unsigned long n)
954 if ((nullb->dev->cache_size * 1024 * 1024) >
955 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
958 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
959 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
965 nullb->cache_flush_pos = c_pages[i]->page->index;
980 err = null_flush_cache_page(nullb, c_pages[i]);
989 nullb->cache_flush_pos = 0;
992 spin_unlock_irq(&nullb->lock);
993 spin_lock_irq(&nullb->lock);
1000 static int copy_to_nullb(struct nullb *nullb, struct page *source,
1009 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1011 if (null_cache_active(nullb) && !is_fua)
1012 null_make_cache_space(nullb, PAGE_SIZE);
1015 t_page = null_insert_page(nullb, sector,
1016 !null_cache_active(nullb) || is_fua);
1029 null_free_sector(nullb, sector, true);
1037 static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1046 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1049 t_page = null_lookup_page(nullb, sector, false,
1050 !null_cache_active(nullb));
1069 static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
1079 static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
1083 spin_lock_irq(&nullb->lock);
1085 temp = min_t(size_t, n, nullb->dev->blocksize);
1086 null_free_sector(nullb, sector, false);
1087 if (null_cache_active(nullb))
1088 null_free_sector(nullb, sector, true);
1092 spin_unlock_irq(&nullb->lock);
1095 static int null_handle_flush(struct nullb *nullb)
1099 if (!null_cache_active(nullb))
1102 spin_lock_irq(&nullb->lock);
1104 err = null_make_cache_space(nullb,
1105 nullb->dev->cache_size * 1024 * 1024);
1106 if (err || nullb->dev->curr_cache == 0)
1110 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1111 spin_unlock_irq(&nullb->lock);
1115 static int null_transfer(struct nullb *nullb, struct page *page,
1119 struct nullb_device *dev = nullb->dev;
1125 valid_len = null_zone_valid_read_len(nullb,
1129 err = copy_from_nullb(nullb, page, off,
1136 nullb_fill_pattern(nullb, page, len, off);
1140 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
1149 struct nullb *nullb = cmd->nq->dev->nullb;
1159 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1163 spin_lock_irq(&nullb->lock);
1166 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1170 spin_unlock_irq(&nullb->lock);
1175 spin_unlock_irq(&nullb->lock);
1183 struct nullb *nullb = cmd->nq->dev->nullb;
1193 null_handle_discard(nullb, sector,
1198 spin_lock_irq(&nullb->lock);
1201 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1205 spin_unlock_irq(&nullb->lock);
1210 spin_unlock_irq(&nullb->lock);
1214 static void null_stop_queue(struct nullb *nullb)
1216 struct request_queue *q = nullb->q;
1218 if (nullb->dev->queue_mode == NULL_Q_MQ)
1222 static void null_restart_queue_async(struct nullb *nullb)
1224 struct request_queue *q = nullb->q;
1226 if (nullb->dev->queue_mode == NULL_Q_MQ)
1233 struct nullb *nullb = dev->nullb;
1237 if (!hrtimer_active(&nullb->bw_timer))
1238 hrtimer_restart(&nullb->bw_timer);
1240 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
1241 null_stop_queue(nullb);
1243 if (atomic_long_read(&nullb->cur_bytes) > 0)
1244 null_restart_queue_async(nullb);
1354 struct nullb *nullb = dev->nullb;
1364 cmd->error = errno_to_blk_status(null_handle_flush(nullb));
1384 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1386 unsigned int mbps = nullb->dev->mbps;
1388 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1391 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1392 null_restart_queue_async(nullb);
1394 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1399 static void nullb_setup_bwtimer(struct nullb *nullb)
1403 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1404 nullb->bw_timer.function = nullb_bwtimer_fn;
1405 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1406 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
1409 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1413 if (nullb->nr_queues != 1)
1414 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1416 return &nullb->queues[index];
1423 struct nullb *nullb = bio->bi_disk->private_data;
1424 struct nullb_queue *nq = nullb_to_queue(nullb);
1518 static void cleanup_queues(struct nullb *nullb)
1522 for (i = 0; i < nullb->nr_queues; i++)
1523 cleanup_queue(&nullb->queues[i]);
1525 kfree(nullb->queues);
1531 struct nullb *nullb = nq->dev->nullb;
1533 nullb->nr_queues--;
1536 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1539 nq->queue_depth = nullb->queue_depth;
1540 nq->dev = nullb->dev;
1546 struct nullb *nullb = hctx->queue->queuedata;
1554 nq = &nullb->queues[hctx_idx];
1556 null_init_queue(nullb, nq);
1557 nullb->nr_queues++;
1570 static void null_del_dev(struct nullb *nullb)
1574 if (!nullb)
1577 dev = nullb->dev;
1579 ida_simple_remove(&nullb_indexes, nullb->index);
1581 list_del_init(&nullb->list);
1583 del_gendisk(nullb->disk);
1585 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1586 hrtimer_cancel(&nullb->bw_timer);
1587 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1588 null_restart_queue_async(nullb);
1591 blk_cleanup_queue(nullb->q);
1593 nullb->tag_set == &nullb->__tag_set)
1594 blk_mq_free_tag_set(nullb->tag_set);
1595 put_disk(nullb->disk);
1596 cleanup_queues(nullb);
1597 if (null_cache_active(nullb))
1598 null_free_device_storage(nullb->dev, true);
1599 kfree(nullb);
1600 dev->nullb = NULL;
1603 static void null_config_discard(struct nullb *nullb)
1605 if (nullb->dev->discard == false)
1608 if (nullb->dev->zoned) {
1609 nullb->dev->discard = false;
1614 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1615 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1616 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
1617 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
1655 static int setup_queues(struct nullb *nullb)
1657 nullb->queues = kcalloc(nr_cpu_ids, sizeof(struct nullb_queue),
1659 if (!nullb->queues)
1662 nullb->queue_depth = nullb->dev->hw_queue_depth;
1667 static int init_driver_queues(struct nullb *nullb)
1672 for (i = 0; i < nullb->dev->submit_queues; i++) {
1673 nq = &nullb->queues[i];
1675 null_init_queue(nullb, nq);
1680 nullb->nr_queues++;
1685 static int null_gendisk_register(struct nullb *nullb)
1687 sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
1690 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
1697 disk->first_minor = nullb->index;
1698 if (queue_is_mq(nullb->q))
1702 disk->private_data = nullb;
1703 disk->queue = nullb->q;
1704 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1706 if (nullb->dev->zoned) {
1707 int ret = null_register_zoned_dev(nullb);
1717 static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
1720 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1722 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1724 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
1733 if ((nullb && nullb->dev->blocking) || g_blocking)
1810 struct nullb *nullb;
1817 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
1818 if (!nullb) {
1822 nullb->dev = dev;
1823 dev->nullb = nullb;
1825 spin_lock_init(&nullb->lock);
1827 rv = setup_queues(nullb);
1833 nullb->tag_set = &tag_set;
1836 nullb->tag_set = &nullb->__tag_set;
1837 rv = null_init_tag_set(nullb, nullb->tag_set);
1846 nullb->tag_set->timeout = 5 * HZ;
1847 nullb->q = blk_mq_init_queue_data(nullb->tag_set, nullb);
1848 if (IS_ERR(nullb->q)) {
1853 nullb->q = blk_alloc_queue(dev->home_node);
1854 if (!nullb->q) {
1858 rv = init_driver_queues(nullb);
1865 nullb_setup_bwtimer(nullb);
1869 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1870 blk_queue_write_cache(nullb->q, true, true);
1874 rv = null_init_zoned_dev(dev, nullb->q);
1879 nullb->q->queuedata = nullb;
1880 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1881 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
1889 nullb->index = rv;
1893 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1894 blk_queue_physical_block_size(nullb->q, dev->blocksize);
1896 null_config_discard(nullb);
1898 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1900 rv = null_gendisk_register(nullb);
1905 list_add_tail(&nullb->list, &nullb_list);
1911 ida_free(&nullb_indexes, nullb->index);
1915 blk_cleanup_queue(nullb->q);
1917 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
1918 blk_mq_free_tag_set(nullb->tag_set);
1920 cleanup_queues(nullb);
1922 kfree(nullb);
1923 dev->nullb = NULL;
1932 struct nullb *nullb;
1976 null_major = register_blkdev(0, "nullb");
2000 nullb = list_entry(nullb_list.next, struct nullb, list);
2001 dev = nullb->dev;
2002 null_del_dev(nullb);
2005 unregister_blkdev(null_major, "nullb");
2016 struct nullb *nullb;
2020 unregister_blkdev(null_major, "nullb");
2026 nullb = list_entry(nullb_list.next, struct nullb, list);
2027 dev = nullb->dev;
2028 null_del_dev(nullb);