Lines Matching refs:brd

33  * the pages containing the block device's contents. A brd page's ->index is
54 * Look up and return a brd's page for a given sector.
56 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
63 * device node -- brd pages will never be deleted under us, so we
74 page = radix_tree_lookup(&brd->brd_pages, idx);
85 static int brd_insert_page(struct brd_device *brd, sector_t sector)
91 page = brd_lookup_page(brd, sector);
109 spin_lock(&brd->brd_lock);
112 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
114 page = radix_tree_lookup(&brd->brd_pages, idx);
118 spin_unlock(&brd->brd_lock);
129 static void brd_free_pages(struct brd_device *brd)
138 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
146 ret = radix_tree_delete(&brd->brd_pages, pos);
170 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
177 ret = brd_insert_page(brd, sector);
182 ret = brd_insert_page(brd, sector);
188 * Copy n bytes from src to the brd starting at sector. Does not sleep.
190 static void copy_to_brd(struct brd_device *brd, const void *src,
199 page = brd_lookup_page(brd, sector);
210 page = brd_lookup_page(brd, sector);
220 * Copy n bytes to dst from the brd starting at sector. Does not sleep.
222 static void copy_from_brd(void *dst, struct brd_device *brd,
231 page = brd_lookup_page(brd, sector);
243 page = brd_lookup_page(brd, sector);
256 static int brd_do_bvec(struct brd_device *brd, struct page *page,
264 err = copy_to_brd_setup(brd, sector, len);
271 copy_from_brd(mem + off, brd, sector, len);
275 copy_to_brd(brd, mem + off, sector, len);
285 struct brd_device *brd = bio->bi_disk->private_data;
302 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
319 struct brd_device *brd = bdev->bd_disk->private_data;
324 err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
340 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
373 struct brd_device *brd;
376 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
377 if (!brd)
379 brd->brd_number = i;
380 spin_lock_init(&brd->brd_lock);
381 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
383 brd->brd_queue = blk_alloc_queue(NUMA_NO_NODE);
384 if (!brd->brd_queue)
393 blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
394 disk = brd->brd_disk = alloc_disk(max_part);
400 disk->private_data = brd;
406 blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
407 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue);
409 return brd;
412 blk_cleanup_queue(brd->brd_queue);
414 kfree(brd);
419 static void brd_free(struct brd_device *brd)
421 put_disk(brd->brd_disk);
422 blk_cleanup_queue(brd->brd_queue);
423 brd_free_pages(brd);
424 kfree(brd);
429 struct brd_device *brd;
432 list_for_each_entry(brd, &brd_devices, brd_list) {
433 if (brd->brd_number == i)
437 brd = brd_alloc(i);
438 if (brd) {
439 brd->brd_disk->queue = brd->brd_queue;
440 add_disk(brd->brd_disk);
441 list_add_tail(&brd->brd_list, &brd_devices);
445 return brd;
448 static void brd_del_one(struct brd_device *brd)
450 list_del(&brd->brd_list);
451 del_gendisk(brd->brd_disk);
452 brd_free(brd);
457 struct brd_device *brd;
462 brd = brd_init_one(MINOR(dev) / max_part, &new);
463 kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL;
485 pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
493 struct brd_device *brd, *next;
497 * brd module now has a feature to instantiate underlying device
502 * (2) User can further extend brd devices by create dev node themselves
517 brd = brd_alloc(i);
518 if (!brd)
520 list_add_tail(&brd->brd_list, &brd_devices);
525 list_for_each_entry(brd, &brd_devices, brd_list) {
530 brd->brd_disk->queue = brd->brd_queue;
531 add_disk(brd->brd_disk);
537 pr_info("brd: module loaded\n");
541 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
542 list_del(&brd->brd_list);
543 brd_free(brd);
547 pr_info("brd: module NOT loaded !!!\n");
553 struct brd_device *brd, *next;
555 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
556 brd_del_one(brd);
561 pr_info("brd: module unloaded\n");