Lines Matching refs:dev

11 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
13 return sect >> ilog2(dev->zone_size_sects);
16 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
22 if (!is_power_of_2(dev->zone_size)) {
26 if (dev->zone_size > dev->size) {
31 if (!dev->zone_capacity)
32 dev->zone_capacity = dev->zone_size;
34 if (dev->zone_capacity > dev->zone_size) {
36 dev->zone_capacity, dev->zone_size);
40 zone_capacity_sects = MB_TO_SECTS(dev->zone_capacity);
41 dev_capacity_sects = MB_TO_SECTS(dev->size);
42 dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
43 dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
44 if (dev_capacity_sects & (dev->zone_size_sects - 1))
45 dev->nr_zones++;
47 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone),
49 if (!dev->zones)
59 spin_lock_init(&dev->zone_lock);
60 if (dev->memory_backed) {
61 dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
62 if (!dev->zone_locks) {
63 kvfree(dev->zones);
68 if (dev->zone_nr_conv >= dev->nr_zones) {
69 dev->zone_nr_conv = dev->nr_zones - 1;
71 dev->zone_nr_conv);
75 if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
76 dev->zone_max_active = 0;
81 if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
82 dev->zone_max_open = dev->zone_max_active;
84 dev->nr_zones);
85 } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
86 dev->zone_max_open = 0;
90 for (i = 0; i < dev->zone_nr_conv; i++) {
91 struct blk_zone *zone = &dev->zones[i];
94 zone->len = dev->zone_size_sects;
100 sector += dev->zone_size_sects;
103 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
104 struct blk_zone *zone = &dev->zones[i];
107 if (zone->start + dev->zone_size_sects > dev_capacity_sects)
110 zone->len = dev->zone_size_sects;
116 sector += dev->zone_size_sects;
128 struct nullb_device *dev = nullb->dev;
137 blk_queue_chunk_sectors(q, dev->zone_size_sects);
141 blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
142 blk_queue_max_open_zones(q, dev->zone_max_open);
143 blk_queue_max_active_zones(q, dev->zone_max_active);
148 void null_free_zoned_dev(struct nullb_device *dev)
150 bitmap_free(dev->zone_locks);
151 kvfree(dev->zones);
152 dev->zones = NULL;
155 static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
157 if (dev->memory_backed)
158 wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
159 spin_lock_irq(&dev->zone_lock);
162 static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
164 spin_unlock_irq(&dev->zone_lock);
166 if (dev->memory_backed)
167 clear_and_wake_up_bit(zno, dev->zone_locks);
174 struct nullb_device *dev = nullb->dev;
179 first_zone = null_zone_no(dev, sector);
180 if (first_zone >= dev->nr_zones)
183 nr_zones = min(nr_zones, dev->nr_zones - first_zone);
194 null_lock_zone(dev, zno);
195 memcpy(&zone, &dev->zones[zno], sizeof(struct blk_zone));
196 null_unlock_zone(dev, zno);
213 struct nullb_device *dev = nullb->dev;
214 struct blk_zone *zone = &dev->zones[null_zone_no(dev, sector)];
228 static blk_status_t null_close_zone(struct nullb_device *dev, struct blk_zone *zone)
238 dev->nr_zones_imp_open--;
241 dev->nr_zones_exp_open--;
253 dev->nr_zones_closed++;
259 static void null_close_first_imp_zone(struct nullb_device *dev)
263 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
264 if (dev->zones[i].cond == BLK_ZONE_COND_IMP_OPEN) {
265 null_close_zone(dev, &dev->zones[i]);
271 static blk_status_t null_check_active(struct nullb_device *dev)
273 if (!dev->zone_max_active)
276 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
277 dev->nr_zones_closed < dev->zone_max_active)
283 static blk_status_t null_check_open(struct nullb_device *dev)
285 if (!dev->zone_max_open)
288 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
291 if (dev->nr_zones_imp_open) {
292 if (null_check_active(dev) == BLK_STS_OK) {
293 null_close_first_imp_zone(dev);
314 static blk_status_t null_check_zone_resources(struct nullb_device *dev, struct blk_zone *zone)
320 ret = null_check_active(dev);
325 return null_check_open(dev);
336 struct nullb_device *dev = cmd->nq->dev;
337 unsigned int zno = null_zone_no(dev, sector);
338 struct blk_zone *zone = &dev->zones[zno];
349 null_lock_zone(dev, zno);
358 ret = null_check_zone_resources(dev, zone);
394 dev->nr_zones_closed--;
395 dev->nr_zones_imp_open++;
397 dev->nr_zones_imp_open++;
407 if (dev->memory_backed)
408 spin_unlock_irq(&dev->zone_lock);
410 if (dev->memory_backed)
411 spin_lock_irq(&dev->zone_lock);
419 dev->nr_zones_exp_open--;
421 dev->nr_zones_imp_open--;
427 null_unlock_zone(dev, zno);
432 static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone)
444 ret = null_check_zone_resources(dev, zone);
449 dev->nr_zones_imp_open--;
452 ret = null_check_zone_resources(dev, zone);
455 dev->nr_zones_closed--;
463 dev->nr_zones_exp_open++;
468 static blk_status_t null_finish_zone(struct nullb_device *dev, struct blk_zone *zone)
480 ret = null_check_zone_resources(dev, zone);
485 dev->nr_zones_imp_open--;
488 dev->nr_zones_exp_open--;
491 ret = null_check_zone_resources(dev, zone);
494 dev->nr_zones_closed--;
506 static blk_status_t null_reset_zone(struct nullb_device *dev, struct blk_zone *zone)
516 dev->nr_zones_imp_open--;
519 dev->nr_zones_exp_open--;
522 dev->nr_zones_closed--;
539 struct nullb_device *dev = cmd->nq->dev;
546 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
547 null_lock_zone(dev, i);
548 zone = &dev->zones[i];
550 null_reset_zone(dev, zone);
553 null_unlock_zone(dev, i);
558 zone_no = null_zone_no(dev, sector);
559 zone = &dev->zones[zone_no];
561 null_lock_zone(dev, zone_no);
565 ret = null_reset_zone(dev, zone);
568 ret = null_open_zone(dev, zone);
571 ret = null_close_zone(dev, zone);
574 ret = null_finish_zone(dev, zone);
584 null_unlock_zone(dev, zone_no);
592 struct nullb_device *dev = cmd->nq->dev;
593 unsigned int zno = null_zone_no(dev, sector);
611 null_lock_zone(dev, zno);
613 null_unlock_zone(dev, zno);