Lines Matching defs:ubd_dev

239 	struct ubd *ubd_dev;
270 ubd_dev = &ubd_devs[n];
271 if(ubd_dev->file != NULL){
289 ubd_dev->no_cow = 1;
292 ubd_dev->shared = 1;
295 ubd_dev->no_trim = 1;
327 if (backing_file && ubd_dev->no_cow) {
333 ubd_dev->file = file;
334 ubd_dev->cow.file = backing_file;
335 ubd_dev->serial = serial;
336 ubd_dev->boot_openflags = flags;
510 static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out)
524 if (ubd_dev->file && ubd_dev->cow.file) {
525 file = ubd_dev->cow.file;
530 fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0);
539 file = ubd_dev->file;
729 static void ubd_close_dev(struct ubd *ubd_dev)
731 os_close_file(ubd_dev->fd);
732 if(ubd_dev->cow.file == NULL)
735 os_close_file(ubd_dev->cow.fd);
736 vfree(ubd_dev->cow.bitmap);
737 ubd_dev->cow.bitmap = NULL;
740 static int ubd_open_dev(struct ubd *ubd_dev)
747 ubd_dev->openflags = ubd_dev->boot_openflags;
749 create_ptr = (ubd_dev->cow.file != NULL) ? &create_cow : NULL;
750 back_ptr = ubd_dev->no_cow ? NULL : &ubd_dev->cow.file;
752 fd = open_ubd_file(ubd_dev->file, &ubd_dev->openflags, ubd_dev->shared,
753 back_ptr, &ubd_dev->cow.bitmap_offset,
754 &ubd_dev->cow.bitmap_len, &ubd_dev->cow.data_offset,
758 fd = create_cow_file(ubd_dev->file, ubd_dev->cow.file,
759 ubd_dev->openflags, SECTOR_SIZE, PAGE_SIZE,
760 &ubd_dev->cow.bitmap_offset,
761 &ubd_dev->cow.bitmap_len,
762 &ubd_dev->cow.data_offset);
765 "\"%s\"\n", ubd_dev->file, ubd_dev->cow.file);
770 printk("Failed to open '%s', errno = %d\n", ubd_dev->file,
774 ubd_dev->fd = fd;
776 if(ubd_dev->cow.file != NULL){
777 blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long));
780 ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len);
781 if(ubd_dev->cow.bitmap == NULL){
787 err = read_cow_bitmap(ubd_dev->fd, ubd_dev->cow.bitmap,
788 ubd_dev->cow.bitmap_offset,
789 ubd_dev->cow.bitmap_len);
793 flags = ubd_dev->openflags;
795 err = open_ubd_file(ubd_dev->cow.file, &flags, ubd_dev->shared, NULL,
798 ubd_dev->cow.fd = err;
800 if (ubd_dev->no_trim == 0) {
801 ubd_dev->queue->limits.discard_granularity = SECTOR_SIZE;
802 blk_queue_max_discard_sectors(ubd_dev->queue, UBD_MAX_REQUEST);
803 blk_queue_max_write_zeroes_sectors(ubd_dev->queue, UBD_MAX_REQUEST);
805 blk_queue_flag_set(QUEUE_FLAG_NONROT, ubd_dev->queue);
808 os_close_file(ubd_dev->fd);
814 struct ubd *ubd_dev = dev_get_drvdata(dev);
816 blk_mq_free_tag_set(&ubd_dev->tag_set);
817 *ubd_dev = ((struct ubd) DEFAULT_UBD);
824 struct ubd *ubd_dev = disk->private_data;
826 if (!ubd_dev)
829 return sprintf(buf, "%s", ubd_dev->serial);
884 struct ubd *ubd_dev = &ubd_devs[n];
888 if(ubd_dev->file == NULL)
891 err = ubd_file_size(ubd_dev, &ubd_dev->size);
897 ubd_dev->size = ROUND_BLOCK(ubd_dev->size);
899 ubd_dev->tag_set.ops = &ubd_mq_ops;
900 ubd_dev->tag_set.queue_depth = 64;
901 ubd_dev->tag_set.numa_node = NUMA_NO_NODE;
902 ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
903 ubd_dev->tag_set.driver_data = ubd_dev;
904 ubd_dev->tag_set.nr_hw_queues = 1;
906 err = blk_mq_alloc_tag_set(&ubd_dev->tag_set);
910 disk = blk_mq_alloc_disk(&ubd_dev->tag_set, ubd_dev);
915 ubd_dev->queue = disk->queue;
917 blk_queue_write_cache(ubd_dev->queue, true, false);
918 blk_queue_max_segments(ubd_dev->queue, MAX_SG);
919 blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1);
920 err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, disk);
930 blk_mq_free_tag_set(&ubd_dev->tag_set);
974 struct ubd *ubd_dev;
983 ubd_dev = &ubd_devs[n];
986 if(ubd_dev->file == NULL){
991 CONFIG_CHUNK(str, size, len, ubd_dev->file, 0);
993 if(ubd_dev->cow.file != NULL){
995 CONFIG_CHUNK(str, size, len, ubd_dev->cow.file, 1);
1017 struct ubd *ubd_dev;
1022 ubd_dev = &ubd_devs[n];
1024 if(ubd_dev->file == NULL)
1029 if(ubd_dev->count > 0)
1039 platform_device_unregister(&ubd_dev->pdev);
1067 struct ubd *ubd_dev = &ubd_devs[0];
1070 if(ubd_dev->file == NULL)
1071 ubd_dev->file = "root_fs";
1159 struct ubd *ubd_dev = disk->private_data;
1163 if(ubd_dev->count == 0){
1164 err = ubd_open_dev(ubd_dev);
1167 disk->disk_name, ubd_dev->file, -err);
1171 ubd_dev->count++;
1172 set_disk_ro(disk, !ubd_dev->openflags.w);
1180 struct ubd *ubd_dev = disk->private_data;
1183 if(--ubd_dev->count == 0)
1184 ubd_close_dev(ubd_dev);
1349 struct ubd *ubd_dev = hctx->queue->queuedata;
1355 spin_lock_irq(&ubd_dev->lock);
1363 ret = ubd_submit_request(ubd_dev, req);
1370 spin_unlock_irq(&ubd_dev->lock);
1384 struct ubd *ubd_dev = bdev->bd_disk->private_data;
1388 geo->cylinders = ubd_dev->size / (128 * 32 * 512);
1395 struct ubd *ubd_dev = bdev->bd_disk->private_data;
1402 ubd_id[ATA_ID_CYLS] = ubd_dev->size / (128 * 32 * 512);