1/*
2 *  linux/fs/nfs/blocklayout/blocklayout.c
3 *
4 *  Module for the NFSv4.1 pNFS block layout driver.
5 *
6 *  Copyright (c) 2006 The Regents of the University of Michigan.
7 *  All rights reserved.
8 *
9 *  Andy Adamson <andros@citi.umich.edu>
10 *  Fred Isaman <iisaman@umich.edu>
11 *
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization.  if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
20 *
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose.  the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
31 */
32
33#include <linux/module.h>
34#include <linux/init.h>
35#include <linux/mount.h>
36#include <linux/namei.h>
37#include <linux/bio.h>		/* struct bio */
38#include <linux/prefetch.h>
39#include <linux/pagevec.h>
40
41#include "../pnfs.h"
42#include "../nfs4session.h"
43#include "../internal.h"
44#include "blocklayout.h"
45
46#define NFSDBG_FACILITY	NFSDBG_PNFS_LD
47
48MODULE_LICENSE("GPL");
49MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
51
52static bool is_hole(struct pnfs_block_extent *be)
53{
54	switch (be->be_state) {
55	case PNFS_BLOCK_NONE_DATA:
56		return true;
57	case PNFS_BLOCK_INVALID_DATA:
58		return be->be_tag ? false : true;
59	default:
60		return false;
61	}
62}
63
64/* The data we are handed might be spread across several bios.  We need
65 * to track when the last one is finished.
66 */
67struct parallel_io {
68	struct kref refcnt;
69	void (*pnfs_callback) (void *data);
70	void *data;
71};
72
73static inline struct parallel_io *alloc_parallel(void *data)
74{
75	struct parallel_io *rv;
76
77	rv  = kmalloc(sizeof(*rv), GFP_NOFS);
78	if (rv) {
79		rv->data = data;
80		kref_init(&rv->refcnt);
81	}
82	return rv;
83}
84
85static inline void get_parallel(struct parallel_io *p)
86{
87	kref_get(&p->refcnt);
88}
89
90static void destroy_parallel(struct kref *kref)
91{
92	struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
93
94	dprintk("%s enter\n", __func__);
95	p->pnfs_callback(p->data);
96	kfree(p);
97}
98
99static inline void put_parallel(struct parallel_io *p)
100{
101	kref_put(&p->refcnt, destroy_parallel);
102}
103
104static struct bio *
105bl_submit_bio(struct bio *bio)
106{
107	if (bio) {
108		get_parallel(bio->bi_private);
109		dprintk("%s submitting %s bio %u@%llu\n", __func__,
110			bio_op(bio) == READ ? "read" : "write",
111			bio->bi_iter.bi_size,
112			(unsigned long long)bio->bi_iter.bi_sector);
113		submit_bio(bio);
114	}
115	return NULL;
116}
117
118static struct bio *
119bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
120		bio_end_io_t end_io, struct parallel_io *par)
121{
122	struct bio *bio;
123
124	npg = min(npg, BIO_MAX_PAGES);
125	bio = bio_alloc(GFP_NOIO, npg);
126	if (!bio && (current->flags & PF_MEMALLOC)) {
127		while (!bio && (npg /= 2))
128			bio = bio_alloc(GFP_NOIO, npg);
129	}
130
131	if (bio) {
132		bio->bi_iter.bi_sector = disk_sector;
133		bio_set_dev(bio, bdev);
134		bio->bi_end_io = end_io;
135		bio->bi_private = par;
136	}
137	return bio;
138}
139
140static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map)
141{
142	return offset >= map->start && offset < map->start + map->len;
143}
144
145static struct bio *
146do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
147		struct page *page, struct pnfs_block_dev_map *map,
148		struct pnfs_block_extent *be, bio_end_io_t end_io,
149		struct parallel_io *par, unsigned int offset, int *len)
150{
151	struct pnfs_block_dev *dev =
152		container_of(be->be_device, struct pnfs_block_dev, node);
153	u64 disk_addr, end;
154
155	dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
156		npg, rw, (unsigned long long)isect, offset, *len);
157
158	/* translate to device offset */
159	isect += be->be_v_offset;
160	isect -= be->be_f_offset;
161
162	/* translate to physical disk offset */
163	disk_addr = (u64)isect << SECTOR_SHIFT;
164	if (!offset_in_map(disk_addr, map)) {
165		if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map))
166			return ERR_PTR(-EIO);
167		bio = bl_submit_bio(bio);
168	}
169	disk_addr += map->disk_offset;
170	disk_addr -= map->start;
171
172	/* limit length to what the device mapping allows */
173	end = disk_addr + *len;
174	if (end >= map->start + map->len)
175		*len = map->start + map->len - disk_addr;
176
177retry:
178	if (!bio) {
179		bio = bl_alloc_init_bio(npg, map->bdev,
180				disk_addr >> SECTOR_SHIFT, end_io, par);
181		if (!bio)
182			return ERR_PTR(-ENOMEM);
183		bio_set_op_attrs(bio, rw, 0);
184	}
185	if (bio_add_page(bio, page, *len, offset) < *len) {
186		bio = bl_submit_bio(bio);
187		goto retry;
188	}
189	return bio;
190}
191
192static void bl_mark_devices_unavailable(struct nfs_pgio_header *header, bool rw)
193{
194	struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
195	size_t bytes_left = header->args.count;
196	sector_t isect, extent_length = 0;
197	struct pnfs_block_extent be;
198
199	isect = header->args.offset >> SECTOR_SHIFT;
200	bytes_left += header->args.offset - (isect << SECTOR_SHIFT);
201
202	while (bytes_left > 0) {
203		if (!ext_tree_lookup(bl, isect, &be, rw))
204				return;
205		extent_length = be.be_length - (isect - be.be_f_offset);
206		nfs4_mark_deviceid_unavailable(be.be_device);
207		isect += extent_length;
208		if (bytes_left > extent_length << SECTOR_SHIFT)
209			bytes_left -= extent_length << SECTOR_SHIFT;
210		else
211			bytes_left = 0;
212	}
213}
214
215static void bl_end_io_read(struct bio *bio)
216{
217	struct parallel_io *par = bio->bi_private;
218
219	if (bio->bi_status) {
220		struct nfs_pgio_header *header = par->data;
221
222		if (!header->pnfs_error)
223			header->pnfs_error = -EIO;
224		pnfs_set_lo_fail(header->lseg);
225		bl_mark_devices_unavailable(header, false);
226	}
227
228	bio_put(bio);
229	put_parallel(par);
230}
231
232static void bl_read_cleanup(struct work_struct *work)
233{
234	struct rpc_task *task;
235	struct nfs_pgio_header *hdr;
236	dprintk("%s enter\n", __func__);
237	task = container_of(work, struct rpc_task, u.tk_work);
238	hdr = container_of(task, struct nfs_pgio_header, task);
239	pnfs_ld_read_done(hdr);
240}
241
242static void
243bl_end_par_io_read(void *data)
244{
245	struct nfs_pgio_header *hdr = data;
246
247	hdr->task.tk_status = hdr->pnfs_error;
248	INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
249	schedule_work(&hdr->task.u.tk_work);
250}
251
252static enum pnfs_try_status
253bl_read_pagelist(struct nfs_pgio_header *header)
254{
255	struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
256	struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
257	struct bio *bio = NULL;
258	struct pnfs_block_extent be;
259	sector_t isect, extent_length = 0;
260	struct parallel_io *par;
261	loff_t f_offset = header->args.offset;
262	size_t bytes_left = header->args.count;
263	unsigned int pg_offset = header->args.pgbase, pg_len;
264	struct page **pages = header->args.pages;
265	int pg_index = header->args.pgbase >> PAGE_SHIFT;
266	const bool is_dio = (header->dreq != NULL);
267	struct blk_plug plug;
268	int i;
269
270	dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
271		header->page_array.npages, f_offset,
272		(unsigned int)header->args.count);
273
274	par = alloc_parallel(header);
275	if (!par)
276		return PNFS_NOT_ATTEMPTED;
277	par->pnfs_callback = bl_end_par_io_read;
278
279	blk_start_plug(&plug);
280
281	isect = (sector_t) (f_offset >> SECTOR_SHIFT);
282	/* Code assumes extents are page-aligned */
283	for (i = pg_index; i < header->page_array.npages; i++) {
284		if (extent_length <= 0) {
285			/* We've used up the previous extent */
286			bio = bl_submit_bio(bio);
287
288			/* Get the next one */
289			if (!ext_tree_lookup(bl, isect, &be, false)) {
290				header->pnfs_error = -EIO;
291				goto out;
292			}
293			extent_length = be.be_length - (isect - be.be_f_offset);
294		}
295
296		if (is_dio) {
297			if (pg_offset + bytes_left > PAGE_SIZE)
298				pg_len = PAGE_SIZE - pg_offset;
299			else
300				pg_len = bytes_left;
301		} else {
302			BUG_ON(pg_offset != 0);
303			pg_len = PAGE_SIZE;
304		}
305
306		if (is_hole(&be)) {
307			bio = bl_submit_bio(bio);
308			/* Fill hole w/ zeroes w/o accessing device */
309			dprintk("%s Zeroing page for hole\n", __func__);
310			zero_user_segment(pages[i], pg_offset, pg_len);
311
312			/* invalidate map */
313			map.start = NFS4_MAX_UINT64;
314		} else {
315			bio = do_add_page_to_bio(bio,
316						 header->page_array.npages - i,
317						 READ,
318						 isect, pages[i], &map, &be,
319						 bl_end_io_read, par,
320						 pg_offset, &pg_len);
321			if (IS_ERR(bio)) {
322				header->pnfs_error = PTR_ERR(bio);
323				bio = NULL;
324				goto out;
325			}
326		}
327		isect += (pg_len >> SECTOR_SHIFT);
328		extent_length -= (pg_len >> SECTOR_SHIFT);
329		f_offset += pg_len;
330		bytes_left -= pg_len;
331		pg_offset = 0;
332	}
333	if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
334		header->res.eof = 1;
335		header->res.count = header->inode->i_size - header->args.offset;
336	} else {
337		header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
338	}
339out:
340	bl_submit_bio(bio);
341	blk_finish_plug(&plug);
342	put_parallel(par);
343	return PNFS_ATTEMPTED;
344}
345
346static void bl_end_io_write(struct bio *bio)
347{
348	struct parallel_io *par = bio->bi_private;
349	struct nfs_pgio_header *header = par->data;
350
351	if (bio->bi_status) {
352		if (!header->pnfs_error)
353			header->pnfs_error = -EIO;
354		pnfs_set_lo_fail(header->lseg);
355		bl_mark_devices_unavailable(header, true);
356	}
357	bio_put(bio);
358	put_parallel(par);
359}
360
361/* Function scheduled for call during bl_end_par_io_write,
362 * it marks sectors as written and extends the commitlist.
363 */
364static void bl_write_cleanup(struct work_struct *work)
365{
366	struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
367	struct nfs_pgio_header *hdr =
368			container_of(task, struct nfs_pgio_header, task);
369
370	dprintk("%s enter\n", __func__);
371
372	if (likely(!hdr->pnfs_error)) {
373		struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
374		u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
375		u64 end = (hdr->args.offset + hdr->args.count +
376			PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
377		u64 lwb = hdr->args.offset + hdr->args.count;
378
379		ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
380					(end - start) >> SECTOR_SHIFT, lwb);
381	}
382
383	pnfs_ld_write_done(hdr);
384}
385
386/* Called when last of bios associated with a bl_write_pagelist call finishes */
387static void bl_end_par_io_write(void *data)
388{
389	struct nfs_pgio_header *hdr = data;
390
391	hdr->task.tk_status = hdr->pnfs_error;
392	hdr->verf.committed = NFS_FILE_SYNC;
393	INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
394	schedule_work(&hdr->task.u.tk_work);
395}
396
397static enum pnfs_try_status
398bl_write_pagelist(struct nfs_pgio_header *header, int sync)
399{
400	struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
401	struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
402	struct bio *bio = NULL;
403	struct pnfs_block_extent be;
404	sector_t isect, extent_length = 0;
405	struct parallel_io *par = NULL;
406	loff_t offset = header->args.offset;
407	size_t count = header->args.count;
408	struct page **pages = header->args.pages;
409	int pg_index = header->args.pgbase >> PAGE_SHIFT;
410	unsigned int pg_len;
411	struct blk_plug plug;
412	int i;
413
414	dprintk("%s enter, %zu@%lld\n", __func__, count, offset);
415
416	/* At this point, header->page_aray is a (sequential) list of nfs_pages.
417	 * We want to write each, and if there is an error set pnfs_error
418	 * to have it redone using nfs.
419	 */
420	par = alloc_parallel(header);
421	if (!par)
422		return PNFS_NOT_ATTEMPTED;
423	par->pnfs_callback = bl_end_par_io_write;
424
425	blk_start_plug(&plug);
426
427	/* we always write out the whole page */
428	offset = offset & (loff_t)PAGE_MASK;
429	isect = offset >> SECTOR_SHIFT;
430
431	for (i = pg_index; i < header->page_array.npages; i++) {
432		if (extent_length <= 0) {
433			/* We've used up the previous extent */
434			bio = bl_submit_bio(bio);
435			/* Get the next one */
436			if (!ext_tree_lookup(bl, isect, &be, true)) {
437				header->pnfs_error = -EINVAL;
438				goto out;
439			}
440
441			extent_length = be.be_length - (isect - be.be_f_offset);
442		}
443
444		pg_len = PAGE_SIZE;
445		bio = do_add_page_to_bio(bio, header->page_array.npages - i,
446					 WRITE, isect, pages[i], &map, &be,
447					 bl_end_io_write, par,
448					 0, &pg_len);
449		if (IS_ERR(bio)) {
450			header->pnfs_error = PTR_ERR(bio);
451			bio = NULL;
452			goto out;
453		}
454
455		offset += pg_len;
456		count -= pg_len;
457		isect += (pg_len >> SECTOR_SHIFT);
458		extent_length -= (pg_len >> SECTOR_SHIFT);
459	}
460
461	header->res.count = header->args.count;
462out:
463	bl_submit_bio(bio);
464	blk_finish_plug(&plug);
465	put_parallel(par);
466	return PNFS_ATTEMPTED;
467}
468
469static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
470{
471	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
472	int err;
473
474	dprintk("%s enter\n", __func__);
475
476	err = ext_tree_remove(bl, true, 0, LLONG_MAX);
477	WARN_ON(err);
478
479	kfree_rcu(bl, bl_layout.plh_rcu);
480}
481
482static struct pnfs_layout_hdr *__bl_alloc_layout_hdr(struct inode *inode,
483		gfp_t gfp_flags, bool is_scsi_layout)
484{
485	struct pnfs_block_layout *bl;
486
487	dprintk("%s enter\n", __func__);
488	bl = kzalloc(sizeof(*bl), gfp_flags);
489	if (!bl)
490		return NULL;
491
492	bl->bl_ext_rw = RB_ROOT;
493	bl->bl_ext_ro = RB_ROOT;
494	spin_lock_init(&bl->bl_ext_lock);
495
496	bl->bl_scsi_layout = is_scsi_layout;
497	return &bl->bl_layout;
498}
499
500static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
501						   gfp_t gfp_flags)
502{
503	return __bl_alloc_layout_hdr(inode, gfp_flags, false);
504}
505
506static struct pnfs_layout_hdr *sl_alloc_layout_hdr(struct inode *inode,
507						   gfp_t gfp_flags)
508{
509	return __bl_alloc_layout_hdr(inode, gfp_flags, true);
510}
511
512static void bl_free_lseg(struct pnfs_layout_segment *lseg)
513{
514	dprintk("%s enter\n", __func__);
515	kfree(lseg);
516}
517
518/* Tracks info needed to ensure extents in layout obey constraints of spec */
519struct layout_verification {
520	u32 mode;	/* R or RW */
521	u64 start;	/* Expected start of next non-COW extent */
522	u64 inval;	/* Start of INVAL coverage */
523	u64 cowread;	/* End of COW read coverage */
524};
525
526/* Verify the extent meets the layout requirements of the pnfs-block draft,
527 * section 2.3.1.
528 */
529static int verify_extent(struct pnfs_block_extent *be,
530			 struct layout_verification *lv)
531{
532	if (lv->mode == IOMODE_READ) {
533		if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
534		    be->be_state == PNFS_BLOCK_INVALID_DATA)
535			return -EIO;
536		if (be->be_f_offset != lv->start)
537			return -EIO;
538		lv->start += be->be_length;
539		return 0;
540	}
541	/* lv->mode == IOMODE_RW */
542	if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
543		if (be->be_f_offset != lv->start)
544			return -EIO;
545		if (lv->cowread > lv->start)
546			return -EIO;
547		lv->start += be->be_length;
548		lv->inval = lv->start;
549		return 0;
550	} else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
551		if (be->be_f_offset != lv->start)
552			return -EIO;
553		lv->start += be->be_length;
554		return 0;
555	} else if (be->be_state == PNFS_BLOCK_READ_DATA) {
556		if (be->be_f_offset > lv->start)
557			return -EIO;
558		if (be->be_f_offset < lv->inval)
559			return -EIO;
560		if (be->be_f_offset < lv->cowread)
561			return -EIO;
562		/* It looks like you might want to min this with lv->start,
563		 * but you really don't.
564		 */
565		lv->inval = lv->inval + be->be_length;
566		lv->cowread = be->be_f_offset + be->be_length;
567		return 0;
568	} else
569		return -EIO;
570}
571
572static int decode_sector_number(__be32 **rp, sector_t *sp)
573{
574	uint64_t s;
575
576	*rp = xdr_decode_hyper(*rp, &s);
577	if (s & 0x1ff) {
578		printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
579		return -1;
580	}
581	*sp = s >> SECTOR_SHIFT;
582	return 0;
583}
584
585static struct nfs4_deviceid_node *
586bl_find_get_deviceid(struct nfs_server *server,
587		const struct nfs4_deviceid *id, const struct cred *cred,
588		gfp_t gfp_mask)
589{
590	struct nfs4_deviceid_node *node;
591	unsigned long start, end;
592
593retry:
594	node = nfs4_find_get_deviceid(server, id, cred, gfp_mask);
595	if (!node)
596		return ERR_PTR(-ENODEV);
597
598	if (test_bit(NFS_DEVICEID_UNAVAILABLE, &node->flags) == 0)
599		return node;
600
601	end = jiffies;
602	start = end - PNFS_DEVICE_RETRY_TIMEOUT;
603	if (!time_in_range(node->timestamp_unavailable, start, end)) {
604		nfs4_delete_deviceid(node->ld, node->nfs_client, id);
605		goto retry;
606	}
607
608	nfs4_put_deviceid_node(node);
609	return ERR_PTR(-ENODEV);
610}
611
612static int
613bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
614		struct layout_verification *lv, struct list_head *extents,
615		gfp_t gfp_mask)
616{
617	struct pnfs_block_extent *be;
618	struct nfs4_deviceid id;
619	int error;
620	__be32 *p;
621
622	p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
623	if (!p)
624		return -EIO;
625
626	be = kzalloc(sizeof(*be), GFP_NOFS);
627	if (!be)
628		return -ENOMEM;
629
630	memcpy(&id, p, NFS4_DEVICEID4_SIZE);
631	p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
632
633	be->be_device = bl_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
634						lo->plh_lc_cred, gfp_mask);
635	if (IS_ERR(be->be_device)) {
636		error = PTR_ERR(be->be_device);
637		goto out_free_be;
638	}
639
640	/*
641	 * The next three values are read in as bytes, but stored in the
642	 * extent structure in 512-byte granularity.
643	 */
644	error = -EIO;
645	if (decode_sector_number(&p, &be->be_f_offset) < 0)
646		goto out_put_deviceid;
647	if (decode_sector_number(&p, &be->be_length) < 0)
648		goto out_put_deviceid;
649	if (decode_sector_number(&p, &be->be_v_offset) < 0)
650		goto out_put_deviceid;
651	be->be_state = be32_to_cpup(p++);
652
653	error = verify_extent(be, lv);
654	if (error) {
655		dprintk("%s: extent verification failed\n", __func__);
656		goto out_put_deviceid;
657	}
658
659	list_add_tail(&be->be_list, extents);
660	return 0;
661
662out_put_deviceid:
663	nfs4_put_deviceid_node(be->be_device);
664out_free_be:
665	kfree(be);
666	return error;
667}
668
669static struct pnfs_layout_segment *
670bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
671		gfp_t gfp_mask)
672{
673	struct layout_verification lv = {
674		.mode = lgr->range.iomode,
675		.start = lgr->range.offset >> SECTOR_SHIFT,
676		.inval = lgr->range.offset >> SECTOR_SHIFT,
677		.cowread = lgr->range.offset >> SECTOR_SHIFT,
678	};
679	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
680	struct pnfs_layout_segment *lseg;
681	struct xdr_buf buf;
682	struct xdr_stream xdr;
683	struct page *scratch;
684	int status, i;
685	uint32_t count;
686	__be32 *p;
687	LIST_HEAD(extents);
688
689	dprintk("---> %s\n", __func__);
690
691	lseg = kzalloc(sizeof(*lseg), gfp_mask);
692	if (!lseg)
693		return ERR_PTR(-ENOMEM);
694
695	status = -ENOMEM;
696	scratch = alloc_page(gfp_mask);
697	if (!scratch)
698		goto out;
699
700	xdr_init_decode_pages(&xdr, &buf,
701			lgr->layoutp->pages, lgr->layoutp->len);
702	xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
703
704	status = -EIO;
705	p = xdr_inline_decode(&xdr, 4);
706	if (unlikely(!p))
707		goto out_free_scratch;
708
709	count = be32_to_cpup(p++);
710	dprintk("%s: number of extents %d\n", __func__, count);
711
712	/*
713	 * Decode individual extents, putting them in temporary staging area
714	 * until whole layout is decoded to make error recovery easier.
715	 */
716	for (i = 0; i < count; i++) {
717		status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
718		if (status)
719			goto process_extents;
720	}
721
722	if (lgr->range.offset + lgr->range.length !=
723			lv.start << SECTOR_SHIFT) {
724		dprintk("%s Final length mismatch\n", __func__);
725		status = -EIO;
726		goto process_extents;
727	}
728
729	if (lv.start < lv.cowread) {
730		dprintk("%s Final uncovered COW extent\n", __func__);
731		status = -EIO;
732	}
733
734process_extents:
735	while (!list_empty(&extents)) {
736		struct pnfs_block_extent *be =
737			list_first_entry(&extents, struct pnfs_block_extent,
738					 be_list);
739		list_del(&be->be_list);
740
741		if (!status)
742			status = ext_tree_insert(bl, be);
743
744		if (status) {
745			nfs4_put_deviceid_node(be->be_device);
746			kfree(be);
747		}
748	}
749
750out_free_scratch:
751	__free_page(scratch);
752out:
753	dprintk("%s returns %d\n", __func__, status);
754	switch (status) {
755	case -ENODEV:
756		/* Our extent block devices are unavailable */
757		set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags);
758		fallthrough;
759	case 0:
760		return lseg;
761	default:
762		kfree(lseg);
763		return ERR_PTR(status);
764	}
765}
766
767static void
768bl_return_range(struct pnfs_layout_hdr *lo,
769		struct pnfs_layout_range *range)
770{
771	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
772	sector_t offset = range->offset >> SECTOR_SHIFT, end;
773
774	if (range->offset % 8) {
775		dprintk("%s: offset %lld not block size aligned\n",
776			__func__, range->offset);
777		return;
778	}
779
780	if (range->length != NFS4_MAX_UINT64) {
781		if (range->length % 8) {
782			dprintk("%s: length %lld not block size aligned\n",
783				__func__, range->length);
784			return;
785		}
786
787		end = offset + (range->length >> SECTOR_SHIFT);
788	} else {
789		end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
790	}
791
792	ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
793}
794
795static int
796bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
797{
798	return ext_tree_prepare_commit(arg);
799}
800
801static void
802bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
803{
804	ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
805}
806
807static int
808bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
809{
810	dprintk("%s enter\n", __func__);
811
812	if (server->pnfs_blksize == 0) {
813		dprintk("%s Server did not return blksize\n", __func__);
814		return -EINVAL;
815	}
816	if (server->pnfs_blksize > PAGE_SIZE) {
817		printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
818			__func__, server->pnfs_blksize);
819		return -EINVAL;
820	}
821
822	return 0;
823}
824
825static bool
826is_aligned_req(struct nfs_pageio_descriptor *pgio,
827		struct nfs_page *req, unsigned int alignment, bool is_write)
828{
829	/*
830	 * Always accept buffered writes, higher layers take care of the
831	 * right alignment.
832	 */
833	if (pgio->pg_dreq == NULL)
834		return true;
835
836	if (!IS_ALIGNED(req->wb_offset, alignment))
837		return false;
838
839	if (IS_ALIGNED(req->wb_bytes, alignment))
840		return true;
841
842	if (is_write &&
843	    (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode))) {
844		/*
845		 * If the write goes up to the inode size, just write
846		 * the full page.  Data past the inode size is
847		 * guaranteed to be zeroed by the higher level client
848		 * code, and this behaviour is mandated by RFC 5663
849		 * section 2.3.2.
850		 */
851		return true;
852	}
853
854	return false;
855}
856
857static void
858bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
859{
860	if (!is_aligned_req(pgio, req, SECTOR_SIZE, false)) {
861		nfs_pageio_reset_read_mds(pgio);
862		return;
863	}
864
865	pnfs_generic_pg_init_read(pgio, req);
866
867	if (pgio->pg_lseg &&
868		test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
869		pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
870		pnfs_set_lo_fail(pgio->pg_lseg);
871		nfs_pageio_reset_read_mds(pgio);
872	}
873}
874
875/*
876 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
877 * of bytes (maximum @req->wb_bytes) that can be coalesced.
878 */
879static size_t
880bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
881		struct nfs_page *req)
882{
883	if (!is_aligned_req(pgio, req, SECTOR_SIZE, false))
884		return 0;
885	return pnfs_generic_pg_test(pgio, prev, req);
886}
887
888/*
889 * Return the number of contiguous bytes for a given inode
890 * starting at page frame idx.
891 */
892static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
893{
894	struct address_space *mapping = inode->i_mapping;
895	pgoff_t end;
896
897	/* Optimize common case that writes from 0 to end of file */
898	end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
899	if (end != inode->i_mapping->nrpages) {
900		rcu_read_lock();
901		end = page_cache_next_miss(mapping, idx + 1, ULONG_MAX);
902		rcu_read_unlock();
903	}
904
905	if (!end)
906		return i_size_read(inode) - (idx << PAGE_SHIFT);
907	else
908		return (end - idx) << PAGE_SHIFT;
909}
910
911static void
912bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
913{
914	u64 wb_size;
915
916	if (!is_aligned_req(pgio, req, PAGE_SIZE, true)) {
917		nfs_pageio_reset_write_mds(pgio);
918		return;
919	}
920
921	if (pgio->pg_dreq == NULL)
922		wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
923					      req->wb_index);
924	else
925		wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
926
927	pnfs_generic_pg_init_write(pgio, req, wb_size);
928
929	if (pgio->pg_lseg &&
930		test_bit(NFS_LSEG_UNAVAILABLE, &pgio->pg_lseg->pls_flags)) {
931
932		pnfs_error_mark_layout_for_return(pgio->pg_inode, pgio->pg_lseg);
933		pnfs_set_lo_fail(pgio->pg_lseg);
934		nfs_pageio_reset_write_mds(pgio);
935	}
936}
937
938/*
939 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
940 * of bytes (maximum @req->wb_bytes) that can be coalesced.
941 */
942static size_t
943bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
944		 struct nfs_page *req)
945{
946	if (!is_aligned_req(pgio, req, PAGE_SIZE, true))
947		return 0;
948	return pnfs_generic_pg_test(pgio, prev, req);
949}
950
951static const struct nfs_pageio_ops bl_pg_read_ops = {
952	.pg_init = bl_pg_init_read,
953	.pg_test = bl_pg_test_read,
954	.pg_doio = pnfs_generic_pg_readpages,
955	.pg_cleanup = pnfs_generic_pg_cleanup,
956};
957
958static const struct nfs_pageio_ops bl_pg_write_ops = {
959	.pg_init = bl_pg_init_write,
960	.pg_test = bl_pg_test_write,
961	.pg_doio = pnfs_generic_pg_writepages,
962	.pg_cleanup = pnfs_generic_pg_cleanup,
963};
964
965static struct pnfs_layoutdriver_type blocklayout_type = {
966	.id				= LAYOUT_BLOCK_VOLUME,
967	.name				= "LAYOUT_BLOCK_VOLUME",
968	.owner				= THIS_MODULE,
969	.flags				= PNFS_LAYOUTRET_ON_SETATTR |
970					  PNFS_LAYOUTRET_ON_ERROR |
971					  PNFS_READ_WHOLE_PAGE,
972	.read_pagelist			= bl_read_pagelist,
973	.write_pagelist			= bl_write_pagelist,
974	.alloc_layout_hdr		= bl_alloc_layout_hdr,
975	.free_layout_hdr		= bl_free_layout_hdr,
976	.alloc_lseg			= bl_alloc_lseg,
977	.free_lseg			= bl_free_lseg,
978	.return_range			= bl_return_range,
979	.prepare_layoutcommit		= bl_prepare_layoutcommit,
980	.cleanup_layoutcommit		= bl_cleanup_layoutcommit,
981	.set_layoutdriver		= bl_set_layoutdriver,
982	.alloc_deviceid_node		= bl_alloc_deviceid_node,
983	.free_deviceid_node		= bl_free_deviceid_node,
984	.pg_read_ops			= &bl_pg_read_ops,
985	.pg_write_ops			= &bl_pg_write_ops,
986	.sync				= pnfs_generic_sync,
987};
988
989static struct pnfs_layoutdriver_type scsilayout_type = {
990	.id				= LAYOUT_SCSI,
991	.name				= "LAYOUT_SCSI",
992	.owner				= THIS_MODULE,
993	.flags				= PNFS_LAYOUTRET_ON_SETATTR |
994					  PNFS_LAYOUTRET_ON_ERROR |
995					  PNFS_READ_WHOLE_PAGE,
996	.read_pagelist			= bl_read_pagelist,
997	.write_pagelist			= bl_write_pagelist,
998	.alloc_layout_hdr		= sl_alloc_layout_hdr,
999	.free_layout_hdr		= bl_free_layout_hdr,
1000	.alloc_lseg			= bl_alloc_lseg,
1001	.free_lseg			= bl_free_lseg,
1002	.return_range			= bl_return_range,
1003	.prepare_layoutcommit		= bl_prepare_layoutcommit,
1004	.cleanup_layoutcommit		= bl_cleanup_layoutcommit,
1005	.set_layoutdriver		= bl_set_layoutdriver,
1006	.alloc_deviceid_node		= bl_alloc_deviceid_node,
1007	.free_deviceid_node		= bl_free_deviceid_node,
1008	.pg_read_ops			= &bl_pg_read_ops,
1009	.pg_write_ops			= &bl_pg_write_ops,
1010	.sync				= pnfs_generic_sync,
1011};
1012
1013
1014static int __init nfs4blocklayout_init(void)
1015{
1016	int ret;
1017
1018	dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
1019
1020	ret = bl_init_pipefs();
1021	if (ret)
1022		goto out;
1023
1024	ret = pnfs_register_layoutdriver(&blocklayout_type);
1025	if (ret)
1026		goto out_cleanup_pipe;
1027
1028	ret = pnfs_register_layoutdriver(&scsilayout_type);
1029	if (ret)
1030		goto out_unregister_block;
1031	return 0;
1032
1033out_unregister_block:
1034	pnfs_unregister_layoutdriver(&blocklayout_type);
1035out_cleanup_pipe:
1036	bl_cleanup_pipefs();
1037out:
1038	return ret;
1039}
1040
1041static void __exit nfs4blocklayout_exit(void)
1042{
1043	dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1044	       __func__);
1045
1046	pnfs_unregister_layoutdriver(&scsilayout_type);
1047	pnfs_unregister_layoutdriver(&blocklayout_type);
1048	bl_cleanup_pipefs();
1049}
1050
1051MODULE_ALIAS("nfs-layouttype4-3");
1052MODULE_ALIAS("nfs-layouttype4-5");
1053
1054module_init(nfs4blocklayout_init);
1055module_exit(nfs4blocklayout_exit);
1056