Lines Matching refs:dev

17 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
19 return sect >> ilog2(dev->zone_size_sects);
22 static inline void null_lock_zone_res(struct nullb_device *dev)
24 if (dev->need_zone_res_mgmt)
25 spin_lock_irq(&dev->zone_res_lock);
28 static inline void null_unlock_zone_res(struct nullb_device *dev)
30 if (dev->need_zone_res_mgmt)
31 spin_unlock_irq(&dev->zone_res_lock);
34 static inline void null_init_zone_lock(struct nullb_device *dev,
37 if (!dev->memory_backed)
43 static inline void null_lock_zone(struct nullb_device *dev,
46 if (!dev->memory_backed)
52 static inline void null_unlock_zone(struct nullb_device *dev,
55 if (!dev->memory_backed)
61 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
68 if (!is_power_of_2(dev->zone_size)) {
72 if (dev->zone_size > dev->size) {
77 if (!dev->zone_capacity)
78 dev->zone_capacity = dev->zone_size;
80 if (dev->zone_capacity > dev->zone_size) {
82 dev->zone_capacity, dev->zone_size);
86 zone_capacity_sects = mb_to_sects(dev->zone_capacity);
87 dev_capacity_sects = mb_to_sects(dev->size);
88 dev->zone_size_sects = mb_to_sects(dev->zone_size);
89 dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
90 >> ilog2(dev->zone_size_sects);
92 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
94 if (!dev->zones)
97 spin_lock_init(&dev->zone_res_lock);
99 if (dev->zone_nr_conv >= dev->nr_zones) {
100 dev->zone_nr_conv = dev->nr_zones - 1;
102 dev->zone_nr_conv);
106 if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
107 dev->zone_max_active = 0;
112 if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
113 dev->zone_max_open = dev->zone_max_active;
115 dev->nr_zones);
116 } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
117 dev->zone_max_open = 0;
120 dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
121 dev->imp_close_zone_no = dev->zone_nr_conv;
123 for (i = 0; i < dev->zone_nr_conv; i++) {
124 zone = &dev->zones[i];
126 null_init_zone_lock(dev, zone);
128 zone->len = dev->zone_size_sects;
134 sector += dev->zone_size_sects;
137 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
138 zone = &dev->zones[i];
140 null_init_zone_lock(dev, zone);
142 if (zone->start + dev->zone_size_sects > dev_capacity_sects)
145 zone->len = dev->zone_size_sects;
151 sector += dev->zone_size_sects;
159 struct nullb_device *dev = nullb->dev;
165 blk_queue_chunk_sectors(q, dev->zone_size_sects);
167 blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
168 disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
169 disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
177 void null_free_zoned_dev(struct nullb_device *dev)
179 kvfree(dev->zones);
180 dev->zones = NULL;
187 struct nullb_device *dev = nullb->dev;
193 first_zone = null_zone_no(dev, sector);
194 if (first_zone >= dev->nr_zones)
197 nr_zones = min(nr_zones, dev->nr_zones - first_zone);
201 zone = &dev->zones[first_zone];
209 null_lock_zone(dev, zone);
216 null_unlock_zone(dev, zone);
233 struct nullb_device *dev = nullb->dev;
234 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
248 static blk_status_t __null_close_zone(struct nullb_device *dev,
256 dev->nr_zones_imp_open--;
259 dev->nr_zones_exp_open--;
271 dev->nr_zones_closed++;
277 static void null_close_imp_open_zone(struct nullb_device *dev)
282 zno = dev->imp_close_zone_no;
283 if (zno >= dev->nr_zones)
284 zno = dev->zone_nr_conv;
286 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
287 zone = &dev->zones[zno];
289 if (zno >= dev->nr_zones)
290 zno = dev->zone_nr_conv;
293 __null_close_zone(dev, zone);
294 dev->imp_close_zone_no = zno;
300 static blk_status_t null_check_active(struct nullb_device *dev)
302 if (!dev->zone_max_active)
305 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
306 dev->nr_zones_closed < dev->zone_max_active)
312 static blk_status_t null_check_open(struct nullb_device *dev)
314 if (!dev->zone_max_open)
317 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
320 if (dev->nr_zones_imp_open) {
321 if (null_check_active(dev) == BLK_STS_OK) {
322 null_close_imp_open_zone(dev);
343 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
350 ret = null_check_active(dev);
355 return null_check_open(dev);
366 struct nullb_device *dev = cmd->nq->dev;
367 unsigned int zno = null_zone_no(dev, sector);
368 struct nullb_zone *zone = &dev->zones[zno];
379 null_lock_zone(dev, zone);
397 if (dev->queue_mode == NULL_Q_MQ)
413 null_lock_zone_res(dev);
415 ret = null_check_zone_resources(dev, zone);
417 null_unlock_zone_res(dev);
421 dev->nr_zones_closed--;
422 dev->nr_zones_imp_open++;
424 dev->nr_zones_imp_open++;
430 null_unlock_zone_res(dev);
439 null_lock_zone_res(dev);
441 dev->nr_zones_exp_open--;
443 dev->nr_zones_imp_open--;
445 null_unlock_zone_res(dev);
451 null_unlock_zone(dev, zone);
456 static blk_status_t null_open_zone(struct nullb_device *dev,
464 null_lock_zone_res(dev);
471 ret = null_check_zone_resources(dev, zone);
476 dev->nr_zones_imp_open--;
479 ret = null_check_zone_resources(dev, zone);
482 dev->nr_zones_closed--;
491 dev->nr_zones_exp_open++;
494 null_unlock_zone_res(dev);
499 static blk_status_t null_close_zone(struct nullb_device *dev,
507 null_lock_zone_res(dev);
508 ret = __null_close_zone(dev, zone);
509 null_unlock_zone_res(dev);
514 static blk_status_t null_finish_zone(struct nullb_device *dev,
522 null_lock_zone_res(dev);
529 ret = null_check_zone_resources(dev, zone);
534 dev->nr_zones_imp_open--;
537 dev->nr_zones_exp_open--;
540 ret = null_check_zone_resources(dev, zone);
543 dev->nr_zones_closed--;
554 null_unlock_zone_res(dev);
559 static blk_status_t null_reset_zone(struct nullb_device *dev,
565 null_lock_zone_res(dev);
570 null_unlock_zone_res(dev);
573 dev->nr_zones_imp_open--;
576 dev->nr_zones_exp_open--;
579 dev->nr_zones_closed--;
584 null_unlock_zone_res(dev);
591 null_unlock_zone_res(dev);
593 if (dev->memory_backed)
594 return null_handle_discard(dev, zone->start, zone->len);
602 struct nullb_device *dev = cmd->nq->dev;
609 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
610 zone = &dev->zones[i];
611 null_lock_zone(dev, zone);
615 null_reset_zone(dev, zone);
618 null_unlock_zone(dev, zone);
623 zone_no = null_zone_no(dev, sector);
624 zone = &dev->zones[zone_no];
626 null_lock_zone(dev, zone);
636 ret = null_reset_zone(dev, zone);
639 ret = null_open_zone(dev, zone);
642 ret = null_close_zone(dev, zone);
645 ret = null_finish_zone(dev, zone);
656 null_unlock_zone(dev, zone);
664 struct nullb_device *dev;
680 dev = cmd->nq->dev;
681 zone = &dev->zones[null_zone_no(dev, sector)];
685 null_lock_zone(dev, zone);
687 null_unlock_zone(dev, zone);
695 static void null_set_zone_cond(struct nullb_device *dev,
702 null_lock_zone(dev, zone);
714 if (dev->memory_backed)
715 null_handle_discard(dev, zone->start, zone->len);
719 null_finish_zone(dev, zone);
724 null_unlock_zone(dev, zone);
731 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
738 if (!dev->zoned) {
743 if (!dev->zones) {
752 zone_no = null_zone_no(dev, sector);
753 if (zone_no >= dev->nr_zones) {
758 if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
763 null_set_zone_cond(dev, &dev->zones[zone_no], cond);