Lines Matching refs:disk

113 	struct gendisk *disk;
147 #define nbd_name(nbd) ((nbd)->disk->disk_name)
167 return disk_to_dev(nbd->disk);
214 struct gendisk *disk = dev_to_disk(dev);
215 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
227 struct gendisk *disk = nbd->disk;
230 if (disk) {
231 q = disk->queue;
232 del_gendisk(disk);
235 disk->private_data = NULL;
236 put_disk(disk);
241 * make sure that the disk and the related kobject are also
299 set_capacity(nbd->disk, 0);
307 struct block_device *bdev = bdget_disk(nbd->disk, 0);
311 nbd->disk->queue->limits.discard_granularity = config->blksize;
312 nbd->disk->queue->limits.discard_alignment = config->blksize;
313 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
315 blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
316 blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
317 set_capacity(nbd->disk, nr_sectors);
324 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
369 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
495 dev_err_ratelimited(disk_to_dev(nbd->disk),
565 dev_err_ratelimited(disk_to_dev(nbd->disk),
623 dev_err_ratelimited(disk_to_dev(nbd->disk),
664 dev_err(disk_to_dev(nbd->disk),
699 dev_err(disk_to_dev(nbd->disk),
705 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
732 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
741 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
747 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
751 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
757 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
763 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
769 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
785 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
817 struct request_queue *q = nbd->disk->queue;
835 dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
885 blk_mq_quiesce_queue(nbd->disk->queue);
887 blk_mq_unquiesce_queue(nbd->disk->queue);
888 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
902 dev_err_ratelimited(disk_to_dev(nbd->disk),
925 dev_err_ratelimited(disk_to_dev(nbd->disk),
959 dev_err_ratelimited(disk_to_dev(nbd->disk),
966 dev_err_ratelimited(disk_to_dev(nbd->disk),
1020 dev_err_ratelimited(disk_to_dev(nbd->disk),
1076 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1102 blk_mq_freeze_queue(nbd->disk->queue);
1111 dev_err(disk_to_dev(nbd->disk),
1142 blk_mq_unfreeze_queue(nbd->disk->queue);
1147 blk_mq_unfreeze_queue(nbd->disk->queue);
1224 set_disk_ro(nbd->disk, true);
1226 set_disk_ro(nbd->disk, false);
1228 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1231 blk_queue_write_cache(nbd->disk->queue, true, true);
1233 blk_queue_write_cache(nbd->disk->queue, true, false);
1236 blk_queue_write_cache(nbd->disk->queue, false, false);
1257 dev_err(disk_to_dev(nbd->disk),
1267 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1290 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1309 nbd->disk->queue->limits.discard_granularity = 0;
1310 nbd->disk->queue->limits.discard_alignment = 0;
1311 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
1312 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);
1332 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1340 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1349 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1351 dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
1400 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1443 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1445 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1598 static void nbd_release(struct gendisk *disk, fmode_t mode)
1600 struct nbd_device *nbd = disk->private_data;
1601 struct block_device *bdev = bdget_disk(disk, 0);
1770 struct gendisk *disk;
1778 disk = alloc_disk(1 << part_shift);
1779 if (!disk)
1797 nbd->disk = disk;
1817 disk->queue = q;
1822 blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
1823 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
1824 disk->queue->limits.discard_granularity = 0;
1825 disk->queue->limits.discard_alignment = 0;
1826 blk_queue_max_discard_sectors(disk->queue, 0);
1827 blk_queue_max_segment_size(disk->queue, UINT_MAX);
1828 blk_queue_max_segments(disk->queue, USHRT_MAX);
1829 blk_queue_max_hw_sectors(disk->queue, 65536);
1830 disk->queue->limits.max_sectors = 256;
1836 disk->major = NBD_MAJOR;
1837 disk->first_minor = index << part_shift;
1838 disk->fops = &nbd_fops;
1839 disk->private_data = nbd;
1840 sprintf(disk->disk_name, "nbd%d", index);
1841 add_disk(disk);
1850 put_disk(disk);
2512 * for the whole disk.