1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Module for pnfs flexfile layout driver.
4  *
5  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6  *
7  * Tao Peng <bergwolf@primarydata.com>
8  */
9 
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/sched/mm.h>
15 
16 #include <linux/sunrpc/metrics.h>
17 
18 #include "flexfilelayout.h"
19 #include "../nfs4session.h"
20 #include "../nfs4idmap.h"
21 #include "../internal.h"
22 #include "../delegation.h"
23 #include "../nfs4trace.h"
24 #include "../iostat.h"
25 #include "../nfs.h"
26 #include "../nfs42.h"
27 
28 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
29 
30 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
31 #define FF_LAYOUTRETURN_MAXERR 20
32 
33 static unsigned short io_maxretrans;
34 
35 static const struct pnfs_commit_ops ff_layout_commit_ops;
36 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
37 		struct nfs_pgio_header *hdr);
38 static int ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
39 			       struct nfs42_layoutstat_devinfo *devinfo,
40 			       int dev_limit);
41 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
42 			      const struct nfs42_layoutstat_devinfo *devinfo,
43 			      struct nfs4_ff_layout_mirror *mirror);
44 
45 static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)46 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
47 {
48 	struct nfs4_flexfile_layout *ffl;
49 
50 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
51 	if (ffl) {
52 		pnfs_init_ds_commit_info(&ffl->commit_info);
53 		INIT_LIST_HEAD(&ffl->error_list);
54 		INIT_LIST_HEAD(&ffl->mirrors);
55 		ffl->last_report_time = ktime_get();
56 		ffl->commit_info.ops = &ff_layout_commit_ops;
57 		return &ffl->generic_hdr;
58 	} else
59 		return NULL;
60 }
61 
62 static void
ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)63 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
64 {
65 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
66 	struct nfs4_ff_layout_ds_err *err, *n;
67 
68 	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
69 		list_del(&err->list);
70 		kfree(err);
71 	}
72 	kfree_rcu(ffl, generic_hdr.plh_rcu);
73 }
74 
decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)75 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
76 {
77 	__be32 *p;
78 
79 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
80 	if (unlikely(p == NULL))
81 		return -ENOBUFS;
82 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
83 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
84 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
85 		p[0], p[1], p[2], p[3]);
86 	return 0;
87 }
88 
decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)89 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
90 {
91 	__be32 *p;
92 
93 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
94 	if (unlikely(!p))
95 		return -ENOBUFS;
96 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
97 	nfs4_print_deviceid(devid);
98 	return 0;
99 }
100 
decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)101 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
102 {
103 	__be32 *p;
104 
105 	p = xdr_inline_decode(xdr, 4);
106 	if (unlikely(!p))
107 		return -ENOBUFS;
108 	fh->size = be32_to_cpup(p++);
109 	if (fh->size > NFS_MAXFHSIZE) {
110 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
111 		       fh->size);
112 		return -EOVERFLOW;
113 	}
114 	/* fh.data */
115 	p = xdr_inline_decode(xdr, fh->size);
116 	if (unlikely(!p))
117 		return -ENOBUFS;
118 	memcpy(&fh->data, p, fh->size);
119 	dprintk("%s: fh len %d\n", __func__, fh->size);
120 
121 	return 0;
122 }
123 
124 /*
125  * Currently only stringified uids and gids are accepted.
126  * I.e., kerberos is not supported to the DSes, so no pricipals.
127  *
128  * That means that one common function will suffice, but when
129  * principals are added, this should be split to accomodate
130  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
131  */
132 static int
decode_name(struct xdr_stream *xdr, u32 *id)133 decode_name(struct xdr_stream *xdr, u32 *id)
134 {
135 	__be32 *p;
136 	int len;
137 
138 	/* opaque_length(4)*/
139 	p = xdr_inline_decode(xdr, 4);
140 	if (unlikely(!p))
141 		return -ENOBUFS;
142 	len = be32_to_cpup(p++);
143 	if (len < 0)
144 		return -EINVAL;
145 
146 	dprintk("%s: len %u\n", __func__, len);
147 
148 	/* opaque body */
149 	p = xdr_inline_decode(xdr, len);
150 	if (unlikely(!p))
151 		return -ENOBUFS;
152 
153 	if (!nfs_map_string_to_numeric((char *)p, len, id))
154 		return -EINVAL;
155 
156 	return 0;
157 }
158 
ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1, const struct nfs4_ff_layout_mirror *m2)159 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
160 		const struct nfs4_ff_layout_mirror *m2)
161 {
162 	int i, j;
163 
164 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
165 		return false;
166 	for (i = 0; i < m1->fh_versions_cnt; i++) {
167 		bool found_fh = false;
168 		for (j = 0; j < m2->fh_versions_cnt; j++) {
169 			if (nfs_compare_fh(&m1->fh_versions[i],
170 					&m2->fh_versions[j]) == 0) {
171 				found_fh = true;
172 				break;
173 			}
174 		}
175 		if (!found_fh)
176 			return false;
177 	}
178 	return true;
179 }
180 
181 static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr *lo, struct nfs4_ff_layout_mirror *mirror)182 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
183 		struct nfs4_ff_layout_mirror *mirror)
184 {
185 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
186 	struct nfs4_ff_layout_mirror *pos;
187 	struct inode *inode = lo->plh_inode;
188 
189 	spin_lock(&inode->i_lock);
190 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
191 		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
192 			continue;
193 		if (!ff_mirror_match_fh(mirror, pos))
194 			continue;
195 		if (refcount_inc_not_zero(&pos->ref)) {
196 			spin_unlock(&inode->i_lock);
197 			return pos;
198 		}
199 	}
200 	list_add(&mirror->mirrors, &ff_layout->mirrors);
201 	mirror->layout = lo;
202 	spin_unlock(&inode->i_lock);
203 	return mirror;
204 }
205 
206 static void
ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)207 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
208 {
209 	struct inode *inode;
210 	if (mirror->layout == NULL)
211 		return;
212 	inode = mirror->layout->plh_inode;
213 	spin_lock(&inode->i_lock);
214 	list_del(&mirror->mirrors);
215 	spin_unlock(&inode->i_lock);
216 	mirror->layout = NULL;
217 }
218 
ff_layout_alloc_mirror(gfp_t gfp_flags)219 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
220 {
221 	struct nfs4_ff_layout_mirror *mirror;
222 
223 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
224 	if (mirror != NULL) {
225 		spin_lock_init(&mirror->lock);
226 		refcount_set(&mirror->ref, 1);
227 		INIT_LIST_HEAD(&mirror->mirrors);
228 	}
229 	return mirror;
230 }
231 
ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)232 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
233 {
234 	const struct cred	*cred;
235 
236 	ff_layout_remove_mirror(mirror);
237 	kfree(mirror->fh_versions);
238 	cred = rcu_access_pointer(mirror->ro_cred);
239 	put_cred(cred);
240 	cred = rcu_access_pointer(mirror->rw_cred);
241 	put_cred(cred);
242 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
243 	kfree(mirror);
244 }
245 
ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)246 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
247 {
248 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
249 		ff_layout_free_mirror(mirror);
250 }
251 
ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)252 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
253 {
254 	u32 i;
255 
256 	for (i = 0; i < fls->mirror_array_cnt; i++)
257 		ff_layout_put_mirror(fls->mirror_array[i]);
258 }
259 
_ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)260 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
261 {
262 	if (fls) {
263 		ff_layout_free_mirror_array(fls);
264 		kfree(fls);
265 	}
266 }
267 
268 static bool
ff_lseg_match_mirrors(struct pnfs_layout_segment *l1, struct pnfs_layout_segment *l2)269 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
270 		struct pnfs_layout_segment *l2)
271 {
272 	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
273 	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
274 	u32 i;
275 
276 	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
277 		return false;
278 	for (i = 0; i < fl1->mirror_array_cnt; i++) {
279 		if (fl1->mirror_array[i] != fl2->mirror_array[i])
280 			return false;
281 	}
282 	return true;
283 }
284 
285 static bool
ff_lseg_range_is_after(const struct pnfs_layout_range *l1, const struct pnfs_layout_range *l2)286 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
287 		const struct pnfs_layout_range *l2)
288 {
289 	u64 end1, end2;
290 
291 	if (l1->iomode != l2->iomode)
292 		return l1->iomode != IOMODE_READ;
293 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
294 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
295 	if (end1 < l2->offset)
296 		return false;
297 	if (end2 < l1->offset)
298 		return true;
299 	return l2->offset <= l1->offset;
300 }
301 
302 static bool
ff_lseg_merge(struct pnfs_layout_segment *new, struct pnfs_layout_segment *old)303 ff_lseg_merge(struct pnfs_layout_segment *new,
304 		struct pnfs_layout_segment *old)
305 {
306 	u64 new_end, old_end;
307 
308 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
309 		return false;
310 	if (new->pls_range.iomode != old->pls_range.iomode)
311 		return false;
312 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
313 			old->pls_range.length);
314 	if (old_end < new->pls_range.offset)
315 		return false;
316 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
317 			new->pls_range.length);
318 	if (new_end < old->pls_range.offset)
319 		return false;
320 	if (!ff_lseg_match_mirrors(new, old))
321 		return false;
322 
323 	/* Mergeable: copy info from 'old' to 'new' */
324 	if (new_end < old_end)
325 		new_end = old_end;
326 	if (new->pls_range.offset < old->pls_range.offset)
327 		new->pls_range.offset = old->pls_range.offset;
328 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
329 			new_end);
330 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
331 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
332 	return true;
333 }
334 
335 static void
ff_layout_add_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg, struct list_head *free_me)336 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
337 		struct pnfs_layout_segment *lseg,
338 		struct list_head *free_me)
339 {
340 	pnfs_generic_layout_insert_lseg(lo, lseg,
341 			ff_lseg_range_is_after,
342 			ff_lseg_merge,
343 			free_me);
344 }
345 
ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)346 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
347 {
348 	int i, j;
349 
350 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
351 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
352 			if (fls->mirror_array[i]->efficiency <
353 			    fls->mirror_array[j]->efficiency)
354 				swap(fls->mirror_array[i],
355 				     fls->mirror_array[j]);
356 	}
357 }
358 
359 static struct pnfs_layout_segment *
ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags)360 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
361 		     struct nfs4_layoutget_res *lgr,
362 		     gfp_t gfp_flags)
363 {
364 	struct pnfs_layout_segment *ret;
365 	struct nfs4_ff_layout_segment *fls = NULL;
366 	struct xdr_stream stream;
367 	struct xdr_buf buf;
368 	struct page *scratch;
369 	u64 stripe_unit;
370 	u32 mirror_array_cnt;
371 	__be32 *p;
372 	int i, rc;
373 
374 	dprintk("--> %s\n", __func__);
375 	scratch = alloc_page(gfp_flags);
376 	if (!scratch)
377 		return ERR_PTR(-ENOMEM);
378 
379 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
380 			      lgr->layoutp->len);
381 	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
382 
383 	/* stripe unit and mirror_array_cnt */
384 	rc = -EIO;
385 	p = xdr_inline_decode(&stream, 8 + 4);
386 	if (!p)
387 		goto out_err_free;
388 
389 	p = xdr_decode_hyper(p, &stripe_unit);
390 	mirror_array_cnt = be32_to_cpup(p++);
391 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
392 		stripe_unit, mirror_array_cnt);
393 
394 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
395 	    mirror_array_cnt == 0)
396 		goto out_err_free;
397 
398 	rc = -ENOMEM;
399 	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
400 			gfp_flags);
401 	if (!fls)
402 		goto out_err_free;
403 
404 	fls->mirror_array_cnt = mirror_array_cnt;
405 	fls->stripe_unit = stripe_unit;
406 
407 	for (i = 0; i < fls->mirror_array_cnt; i++) {
408 		struct nfs4_ff_layout_mirror *mirror;
409 		struct cred *kcred;
410 		const struct cred __rcu *cred;
411 		kuid_t uid;
412 		kgid_t gid;
413 		u32 ds_count, fh_count, id;
414 		int j;
415 
416 		rc = -EIO;
417 		p = xdr_inline_decode(&stream, 4);
418 		if (!p)
419 			goto out_err_free;
420 		ds_count = be32_to_cpup(p);
421 
422 		/* FIXME: allow for striping? */
423 		if (ds_count != 1)
424 			goto out_err_free;
425 
426 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
427 		if (fls->mirror_array[i] == NULL) {
428 			rc = -ENOMEM;
429 			goto out_err_free;
430 		}
431 
432 		fls->mirror_array[i]->ds_count = ds_count;
433 
434 		/* deviceid */
435 		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
436 		if (rc)
437 			goto out_err_free;
438 
439 		/* efficiency */
440 		rc = -EIO;
441 		p = xdr_inline_decode(&stream, 4);
442 		if (!p)
443 			goto out_err_free;
444 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
445 
446 		/* stateid */
447 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
448 		if (rc)
449 			goto out_err_free;
450 
451 		/* fh */
452 		rc = -EIO;
453 		p = xdr_inline_decode(&stream, 4);
454 		if (!p)
455 			goto out_err_free;
456 		fh_count = be32_to_cpup(p);
457 
458 		fls->mirror_array[i]->fh_versions =
459 			kcalloc(fh_count, sizeof(struct nfs_fh),
460 				gfp_flags);
461 		if (fls->mirror_array[i]->fh_versions == NULL) {
462 			rc = -ENOMEM;
463 			goto out_err_free;
464 		}
465 
466 		for (j = 0; j < fh_count; j++) {
467 			rc = decode_nfs_fh(&stream,
468 					   &fls->mirror_array[i]->fh_versions[j]);
469 			if (rc)
470 				goto out_err_free;
471 		}
472 
473 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
474 
475 		/* user */
476 		rc = decode_name(&stream, &id);
477 		if (rc)
478 			goto out_err_free;
479 
480 		uid = make_kuid(&init_user_ns, id);
481 
482 		/* group */
483 		rc = decode_name(&stream, &id);
484 		if (rc)
485 			goto out_err_free;
486 
487 		gid = make_kgid(&init_user_ns, id);
488 
489 		if (gfp_flags & __GFP_FS)
490 			kcred = prepare_kernel_cred(NULL);
491 		else {
492 			unsigned int nofs_flags = memalloc_nofs_save();
493 			kcred = prepare_kernel_cred(NULL);
494 			memalloc_nofs_restore(nofs_flags);
495 		}
496 		rc = -ENOMEM;
497 		if (!kcred)
498 			goto out_err_free;
499 		kcred->fsuid = uid;
500 		kcred->fsgid = gid;
501 		cred = RCU_INITIALIZER(kcred);
502 
503 		if (lgr->range.iomode == IOMODE_READ)
504 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
505 		else
506 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
507 
508 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
509 		if (mirror != fls->mirror_array[i]) {
510 			/* swap cred ptrs so free_mirror will clean up old */
511 			if (lgr->range.iomode == IOMODE_READ) {
512 				cred = xchg(&mirror->ro_cred, cred);
513 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
514 			} else {
515 				cred = xchg(&mirror->rw_cred, cred);
516 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
517 			}
518 			ff_layout_free_mirror(fls->mirror_array[i]);
519 			fls->mirror_array[i] = mirror;
520 		}
521 
522 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
523 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
524 			from_kuid(&init_user_ns, uid),
525 			from_kgid(&init_user_ns, gid));
526 	}
527 
528 	p = xdr_inline_decode(&stream, 4);
529 	if (!p)
530 		goto out_sort_mirrors;
531 	fls->flags = be32_to_cpup(p);
532 
533 	p = xdr_inline_decode(&stream, 4);
534 	if (!p)
535 		goto out_sort_mirrors;
536 	for (i=0; i < fls->mirror_array_cnt; i++)
537 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
538 
539 out_sort_mirrors:
540 	ff_layout_sort_mirrors(fls);
541 	ret = &fls->generic_hdr;
542 	dprintk("<-- %s (success)\n", __func__);
543 out_free_page:
544 	__free_page(scratch);
545 	return ret;
546 out_err_free:
547 	_ff_layout_free_lseg(fls);
548 	ret = ERR_PTR(rc);
549 	dprintk("<-- %s (%d)\n", __func__, rc);
550 	goto out_free_page;
551 }
552 
553 static void
ff_layout_free_lseg(struct pnfs_layout_segment *lseg)554 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
555 {
556 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
557 
558 	dprintk("--> %s\n", __func__);
559 
560 	if (lseg->pls_range.iomode == IOMODE_RW) {
561 		struct nfs4_flexfile_layout *ffl;
562 		struct inode *inode;
563 
564 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
565 		inode = ffl->generic_hdr.plh_inode;
566 		spin_lock(&inode->i_lock);
567 		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
568 		spin_unlock(&inode->i_lock);
569 	}
570 	_ff_layout_free_lseg(fls);
571 }
572 
573 static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)574 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
575 {
576 	/* first IO request? */
577 	if (atomic_inc_return(&timer->n_ops) == 1) {
578 		timer->start_time = now;
579 	}
580 }
581 
582 static ktime_t
nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)583 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
584 {
585 	ktime_t start;
586 
587 	if (atomic_dec_return(&timer->n_ops) < 0)
588 		WARN_ON_ONCE(1);
589 
590 	start = timer->start_time;
591 	timer->start_time = now;
592 	return ktime_sub(now, start);
593 }
594 
595 static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror, struct nfs4_ff_layoutstat *layoutstat, ktime_t now)596 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
597 			    struct nfs4_ff_layoutstat *layoutstat,
598 			    ktime_t now)
599 {
600 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
601 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
602 
603 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
604 	if (!mirror->start_time)
605 		mirror->start_time = now;
606 	if (mirror->report_interval != 0)
607 		report_interval = (s64)mirror->report_interval * 1000LL;
608 	else if (layoutstats_timer != 0)
609 		report_interval = (s64)layoutstats_timer * 1000LL;
610 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
611 			report_interval) {
612 		ffl->last_report_time = now;
613 		return true;
614 	}
615 
616 	return false;
617 }
618 
619 static void
nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat, __u64 requested)620 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
621 		__u64 requested)
622 {
623 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
624 
625 	iostat->ops_requested++;
626 	iostat->bytes_requested += requested;
627 }
628 
629 static void
nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat, __u64 requested, __u64 completed, ktime_t time_completed, ktime_t time_started)630 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
631 		__u64 requested,
632 		__u64 completed,
633 		ktime_t time_completed,
634 		ktime_t time_started)
635 {
636 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
637 	ktime_t completion_time = ktime_sub(time_completed, time_started);
638 	ktime_t timer;
639 
640 	iostat->ops_completed++;
641 	iostat->bytes_completed += completed;
642 	iostat->bytes_not_delivered += requested - completed;
643 
644 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
645 	iostat->total_busy_time =
646 			ktime_add(iostat->total_busy_time, timer);
647 	iostat->aggregate_completion_time =
648 			ktime_add(iostat->aggregate_completion_time,
649 					completion_time);
650 }
651 
652 static void
nfs4_ff_layout_stat_io_start_read(struct inode *inode, struct nfs4_ff_layout_mirror *mirror, __u64 requested, ktime_t now)653 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
654 		struct nfs4_ff_layout_mirror *mirror,
655 		__u64 requested, ktime_t now)
656 {
657 	bool report;
658 
659 	spin_lock(&mirror->lock);
660 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
661 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
662 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
663 	spin_unlock(&mirror->lock);
664 
665 	if (report)
666 		pnfs_report_layoutstat(inode, GFP_KERNEL);
667 }
668 
669 static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task *task, struct nfs4_ff_layout_mirror *mirror, __u64 requested, __u64 completed)670 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
671 		struct nfs4_ff_layout_mirror *mirror,
672 		__u64 requested,
673 		__u64 completed)
674 {
675 	spin_lock(&mirror->lock);
676 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
677 			requested, completed,
678 			ktime_get(), task->tk_start);
679 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
680 	spin_unlock(&mirror->lock);
681 }
682 
683 static void
nfs4_ff_layout_stat_io_start_write(struct inode *inode, struct nfs4_ff_layout_mirror *mirror, __u64 requested, ktime_t now)684 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
685 		struct nfs4_ff_layout_mirror *mirror,
686 		__u64 requested, ktime_t now)
687 {
688 	bool report;
689 
690 	spin_lock(&mirror->lock);
691 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
692 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
693 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
694 	spin_unlock(&mirror->lock);
695 
696 	if (report)
697 		pnfs_report_layoutstat(inode, GFP_NOIO);
698 }
699 
700 static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task *task, struct nfs4_ff_layout_mirror *mirror, __u64 requested, __u64 completed, enum nfs3_stable_how committed)701 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
702 		struct nfs4_ff_layout_mirror *mirror,
703 		__u64 requested,
704 		__u64 completed,
705 		enum nfs3_stable_how committed)
706 {
707 	if (committed == NFS_UNSTABLE)
708 		requested = completed = 0;
709 
710 	spin_lock(&mirror->lock);
711 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
712 			requested, completed, ktime_get(), task->tk_start);
713 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
714 	spin_unlock(&mirror->lock);
715 }
716 
717 static void
ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)718 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
719 {
720 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
721 
722 	if (devid)
723 		nfs4_mark_deviceid_unavailable(devid);
724 }
725 
726 static void
ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)727 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
728 {
729 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
730 
731 	if (devid)
732 		nfs4_mark_deviceid_available(devid);
733 }
734 
735 static struct nfs4_pnfs_ds *
ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg, u32 start_idx, u32 *best_idx, bool check_device)736 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
737 			     u32 start_idx, u32 *best_idx,
738 			     bool check_device)
739 {
740 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
741 	struct nfs4_ff_layout_mirror *mirror;
742 	struct nfs4_pnfs_ds *ds;
743 	bool fail_return = false;
744 	u32 idx;
745 
746 	/* mirrors are initially sorted by efficiency */
747 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
748 		if (idx+1 == fls->mirror_array_cnt)
749 			fail_return = !check_device;
750 
751 		mirror = FF_LAYOUT_COMP(lseg, idx);
752 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, fail_return);
753 		if (!ds)
754 			continue;
755 
756 		if (check_device &&
757 		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
758 			continue;
759 
760 		*best_idx = idx;
761 		return ds;
762 	}
763 
764 	return NULL;
765 }
766 
767 static struct nfs4_pnfs_ds *
ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg, u32 start_idx, u32 *best_idx)768 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
769 				 u32 start_idx, u32 *best_idx)
770 {
771 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
772 }
773 
774 static struct nfs4_pnfs_ds *
ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg, u32 start_idx, u32 *best_idx)775 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
776 				   u32 start_idx, u32 *best_idx)
777 {
778 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
779 }
780 
781 static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg, u32 start_idx, u32 *best_idx)782 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
783 				  u32 start_idx, u32 *best_idx)
784 {
785 	struct nfs4_pnfs_ds *ds;
786 
787 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
788 	if (ds)
789 		return ds;
790 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
791 }
792 
793 static struct nfs4_pnfs_ds *
ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio, u32 *best_idx)794 ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
795 			  u32 *best_idx)
796 {
797 	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
798 	struct nfs4_pnfs_ds *ds;
799 
800 	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
801 					       best_idx);
802 	if (ds || !pgio->pg_mirror_idx)
803 		return ds;
804 	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
805 }
806 
807 static void
ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req, bool strict_iomode)808 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
809 		      struct nfs_page *req,
810 		      bool strict_iomode)
811 {
812 	pnfs_put_lseg(pgio->pg_lseg);
813 	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
814 					   nfs_req_openctx(req),
815 					   req_offset(req),
816 					   req->wb_bytes,
817 					   IOMODE_READ,
818 					   strict_iomode,
819 					   GFP_KERNEL);
820 	if (IS_ERR(pgio->pg_lseg)) {
821 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
822 		pgio->pg_lseg = NULL;
823 	}
824 }
825 
826 static void
ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)827 ff_layout_pg_check_layout(struct nfs_pageio_descriptor *pgio,
828 			  struct nfs_page *req)
829 {
830 	pnfs_generic_pg_check_layout(pgio);
831 	pnfs_generic_pg_check_range(pgio, req);
832 }
833 
834 static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)835 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
836 			struct nfs_page *req)
837 {
838 	struct nfs_pgio_mirror *pgm;
839 	struct nfs4_ff_layout_mirror *mirror;
840 	struct nfs4_pnfs_ds *ds;
841 	u32 ds_idx;
842 
843 retry:
844 	ff_layout_pg_check_layout(pgio, req);
845 	/* Use full layout for now */
846 	if (!pgio->pg_lseg) {
847 		ff_layout_pg_get_read(pgio, req, false);
848 		if (!pgio->pg_lseg)
849 			goto out_nolseg;
850 	}
851 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
852 		ff_layout_pg_get_read(pgio, req, true);
853 		if (!pgio->pg_lseg)
854 			goto out_nolseg;
855 	}
856 
857 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
858 	if (!ds) {
859 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
860 			goto out_mds;
861 		pnfs_generic_pg_cleanup(pgio);
862 		/* Sleep for 1 second before retrying */
863 		ssleep(1);
864 		goto retry;
865 	}
866 
867 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
868 	pgm = &pgio->pg_mirrors[0];
869 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
870 
871 	pgio->pg_mirror_idx = ds_idx;
872 
873 	if (NFS_SERVER(pgio->pg_inode)->flags &
874 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
875 		pgio->pg_maxretrans = io_maxretrans;
876 	return;
877 out_nolseg:
878 	if (pgio->pg_error < 0)
879 		return;
880 out_mds:
881 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
882 			0, NFS4_MAX_UINT64, IOMODE_READ,
883 			NFS_I(pgio->pg_inode)->layout,
884 			pgio->pg_lseg);
885 	pgio->pg_maxretrans = 0;
886 	nfs_pageio_reset_read_mds(pgio);
887 }
888 
889 static void
ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)890 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
891 			struct nfs_page *req)
892 {
893 	struct nfs4_ff_layout_mirror *mirror;
894 	struct nfs_pgio_mirror *pgm;
895 	struct nfs4_pnfs_ds *ds;
896 	u32 i;
897 
898 retry:
899 	ff_layout_pg_check_layout(pgio, req);
900 	if (!pgio->pg_lseg) {
901 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
902 						   nfs_req_openctx(req),
903 						   req_offset(req),
904 						   req->wb_bytes,
905 						   IOMODE_RW,
906 						   false,
907 						   GFP_NOFS);
908 		if (IS_ERR(pgio->pg_lseg)) {
909 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
910 			pgio->pg_lseg = NULL;
911 			return;
912 		}
913 	}
914 	/* If no lseg, fall back to write through mds */
915 	if (pgio->pg_lseg == NULL)
916 		goto out_mds;
917 
918 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
919 	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
920 		goto out_eagain;
921 
922 	for (i = 0; i < pgio->pg_mirror_count; i++) {
923 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
924 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
925 		if (!ds) {
926 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
927 				goto out_mds;
928 			pnfs_generic_pg_cleanup(pgio);
929 			/* Sleep for 1 second before retrying */
930 			ssleep(1);
931 			goto retry;
932 		}
933 		pgm = &pgio->pg_mirrors[i];
934 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
935 	}
936 
937 	if (NFS_SERVER(pgio->pg_inode)->flags &
938 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
939 		pgio->pg_maxretrans = io_maxretrans;
940 	return;
941 out_eagain:
942 	pnfs_generic_pg_cleanup(pgio);
943 	pgio->pg_error = -EAGAIN;
944 	return;
945 out_mds:
946 	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
947 			0, NFS4_MAX_UINT64, IOMODE_RW,
948 			NFS_I(pgio->pg_inode)->layout,
949 			pgio->pg_lseg);
950 	pgio->pg_maxretrans = 0;
951 	nfs_pageio_reset_write_mds(pgio);
952 	pgio->pg_error = -EAGAIN;
953 }
954 
955 static unsigned int
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)956 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
957 				    struct nfs_page *req)
958 {
959 	if (!pgio->pg_lseg) {
960 		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
961 						   nfs_req_openctx(req),
962 						   req_offset(req),
963 						   req->wb_bytes,
964 						   IOMODE_RW,
965 						   false,
966 						   GFP_NOFS);
967 		if (IS_ERR(pgio->pg_lseg)) {
968 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
969 			pgio->pg_lseg = NULL;
970 			goto out;
971 		}
972 	}
973 	if (pgio->pg_lseg)
974 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
975 
976 	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
977 			0, NFS4_MAX_UINT64, IOMODE_RW,
978 			NFS_I(pgio->pg_inode)->layout,
979 			pgio->pg_lseg);
980 	/* no lseg means that pnfs is not in use, so no mirroring here */
981 	nfs_pageio_reset_write_mds(pgio);
982 out:
983 	return 1;
984 }
985 
986 static u32
ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)987 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
988 {
989 	u32 old = desc->pg_mirror_idx;
990 
991 	desc->pg_mirror_idx = idx;
992 	return old;
993 }
994 
995 static struct nfs_pgio_mirror *
ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)996 ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
997 {
998 	return &desc->pg_mirrors[idx];
999 }
1000 
1001 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1002 	.pg_init = ff_layout_pg_init_read,
1003 	.pg_test = pnfs_generic_pg_test,
1004 	.pg_doio = pnfs_generic_pg_readpages,
1005 	.pg_cleanup = pnfs_generic_pg_cleanup,
1006 };
1007 
1008 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1009 	.pg_init = ff_layout_pg_init_write,
1010 	.pg_test = pnfs_generic_pg_test,
1011 	.pg_doio = pnfs_generic_pg_writepages,
1012 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1013 	.pg_cleanup = pnfs_generic_pg_cleanup,
1014 	.pg_get_mirror = ff_layout_pg_get_mirror_write,
1015 	.pg_set_mirror = ff_layout_pg_set_mirror_write,
1016 };
1017 
ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)1018 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1019 {
1020 	struct rpc_task *task = &hdr->task;
1021 
1022 	pnfs_layoutcommit_inode(hdr->inode, false);
1023 
1024 	if (retry_pnfs) {
1025 		dprintk("%s Reset task %5u for i/o through pNFS "
1026 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1027 			hdr->task.tk_pid,
1028 			hdr->inode->i_sb->s_id,
1029 			(unsigned long long)NFS_FILEID(hdr->inode),
1030 			hdr->args.count,
1031 			(unsigned long long)hdr->args.offset);
1032 
1033 		hdr->completion_ops->reschedule_io(hdr);
1034 		return;
1035 	}
1036 
1037 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1038 		dprintk("%s Reset task %5u for i/o through MDS "
1039 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1040 			hdr->task.tk_pid,
1041 			hdr->inode->i_sb->s_id,
1042 			(unsigned long long)NFS_FILEID(hdr->inode),
1043 			hdr->args.count,
1044 			(unsigned long long)hdr->args.offset);
1045 
1046 		trace_pnfs_mds_fallback_write_done(hdr->inode,
1047 				hdr->args.offset, hdr->args.count,
1048 				IOMODE_RW, NFS_I(hdr->inode)->layout,
1049 				hdr->lseg);
1050 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1051 	}
1052 }
1053 
ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)1054 static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1055 {
1056 	u32 idx = hdr->pgio_mirror_idx + 1;
1057 	u32 new_idx = 0;
1058 
1059 	if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
1060 		ff_layout_send_layouterror(hdr->lseg);
1061 	else
1062 		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1063 	pnfs_read_resend_pnfs(hdr, new_idx);
1064 }
1065 
ff_layout_reset_read(struct nfs_pgio_header *hdr)1066 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1067 {
1068 	struct rpc_task *task = &hdr->task;
1069 
1070 	pnfs_layoutcommit_inode(hdr->inode, false);
1071 	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1072 
1073 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1074 		dprintk("%s Reset task %5u for i/o through MDS "
1075 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1076 			hdr->task.tk_pid,
1077 			hdr->inode->i_sb->s_id,
1078 			(unsigned long long)NFS_FILEID(hdr->inode),
1079 			hdr->args.count,
1080 			(unsigned long long)hdr->args.offset);
1081 
1082 		trace_pnfs_mds_fallback_read_done(hdr->inode,
1083 				hdr->args.offset, hdr->args.count,
1084 				IOMODE_READ, NFS_I(hdr->inode)->layout,
1085 				hdr->lseg);
1086 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1087 	}
1088 }
1089 
ff_layout_async_handle_error_v4(struct rpc_task *task, struct nfs4_state *state, struct nfs_client *clp, struct pnfs_layout_segment *lseg, u32 idx)1090 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1091 					   struct nfs4_state *state,
1092 					   struct nfs_client *clp,
1093 					   struct pnfs_layout_segment *lseg,
1094 					   u32 idx)
1095 {
1096 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1097 	struct inode *inode = lo->plh_inode;
1098 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1099 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1100 
1101 	switch (task->tk_status) {
1102 	case -NFS4ERR_BADSESSION:
1103 	case -NFS4ERR_BADSLOT:
1104 	case -NFS4ERR_BAD_HIGH_SLOT:
1105 	case -NFS4ERR_DEADSESSION:
1106 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1107 	case -NFS4ERR_SEQ_FALSE_RETRY:
1108 	case -NFS4ERR_SEQ_MISORDERED:
1109 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1110 			"flags 0x%x\n", __func__, task->tk_status,
1111 			clp->cl_exchange_flags);
1112 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1113 		break;
1114 	case -NFS4ERR_DELAY:
1115 	case -NFS4ERR_GRACE:
1116 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1117 		break;
1118 	case -NFS4ERR_RETRY_UNCACHED_REP:
1119 		break;
1120 	/* Invalidate Layout errors */
1121 	case -NFS4ERR_PNFS_NO_LAYOUT:
1122 	case -ESTALE:           /* mapped NFS4ERR_STALE */
1123 	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1124 	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1125 	case -NFS4ERR_FHEXPIRED:
1126 	case -NFS4ERR_WRONG_TYPE:
1127 		dprintk("%s Invalid layout error %d\n", __func__,
1128 			task->tk_status);
1129 		/*
1130 		 * Destroy layout so new i/o will get a new layout.
1131 		 * Layout will not be destroyed until all current lseg
1132 		 * references are put. Mark layout as invalid to resend failed
1133 		 * i/o and all i/o waiting on the slot table to the MDS until
1134 		 * layout is destroyed and a new valid layout is obtained.
1135 		 */
1136 		pnfs_destroy_layout(NFS_I(inode));
1137 		rpc_wake_up(&tbl->slot_tbl_waitq);
1138 		goto reset;
1139 	/* RPC connection errors */
1140 	case -ECONNREFUSED:
1141 	case -EHOSTDOWN:
1142 	case -EHOSTUNREACH:
1143 	case -ENETUNREACH:
1144 	case -EIO:
1145 	case -ETIMEDOUT:
1146 	case -EPIPE:
1147 		dprintk("%s DS connection error %d\n", __func__,
1148 			task->tk_status);
1149 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1150 				&devid->deviceid);
1151 		rpc_wake_up(&tbl->slot_tbl_waitq);
1152 		fallthrough;
1153 	default:
1154 		if (ff_layout_avoid_mds_available_ds(lseg))
1155 			return -NFS4ERR_RESET_TO_PNFS;
1156 reset:
1157 		dprintk("%s Retry through MDS. Error %d\n", __func__,
1158 			task->tk_status);
1159 		return -NFS4ERR_RESET_TO_MDS;
1160 	}
1161 	task->tk_status = 0;
1162 	return -EAGAIN;
1163 }
1164 
1165 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
ff_layout_async_handle_error_v3(struct rpc_task *task, struct pnfs_layout_segment *lseg, u32 idx)1166 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1167 					   struct pnfs_layout_segment *lseg,
1168 					   u32 idx)
1169 {
1170 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1171 
1172 	switch (task->tk_status) {
1173 	/* File access problems. Don't mark the device as unavailable */
1174 	case -EACCES:
1175 	case -ESTALE:
1176 	case -EISDIR:
1177 	case -EBADHANDLE:
1178 	case -ELOOP:
1179 	case -ENOSPC:
1180 		break;
1181 	case -EJUKEBOX:
1182 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1183 		goto out_retry;
1184 	default:
1185 		dprintk("%s DS connection error %d\n", __func__,
1186 			task->tk_status);
1187 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1188 				&devid->deviceid);
1189 	}
1190 	/* FIXME: Need to prevent infinite looping here. */
1191 	return -NFS4ERR_RESET_TO_PNFS;
1192 out_retry:
1193 	task->tk_status = 0;
1194 	rpc_restart_call_prepare(task);
1195 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1196 	return -EAGAIN;
1197 }
1198 
ff_layout_async_handle_error(struct rpc_task *task, struct nfs4_state *state, struct nfs_client *clp, struct pnfs_layout_segment *lseg, u32 idx)1199 static int ff_layout_async_handle_error(struct rpc_task *task,
1200 					struct nfs4_state *state,
1201 					struct nfs_client *clp,
1202 					struct pnfs_layout_segment *lseg,
1203 					u32 idx)
1204 {
1205 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1206 
1207 	if (task->tk_status >= 0) {
1208 		ff_layout_mark_ds_reachable(lseg, idx);
1209 		return 0;
1210 	}
1211 
1212 	/* Handle the case of an invalid layout segment */
1213 	if (!pnfs_is_valid_lseg(lseg))
1214 		return -NFS4ERR_RESET_TO_PNFS;
1215 
1216 	switch (vers) {
1217 	case 3:
1218 		return ff_layout_async_handle_error_v3(task, lseg, idx);
1219 	case 4:
1220 		return ff_layout_async_handle_error_v4(task, state, clp,
1221 						       lseg, idx);
1222 	default:
1223 		/* should never happen */
1224 		WARN_ON_ONCE(1);
1225 		return 0;
1226 	}
1227 }
1228 
ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, u32 idx, u64 offset, u64 length, u32 *op_status, int opnum, int error)1229 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1230 					u32 idx, u64 offset, u64 length,
1231 					u32 *op_status, int opnum, int error)
1232 {
1233 	struct nfs4_ff_layout_mirror *mirror;
1234 	u32 status = *op_status;
1235 	int err;
1236 
1237 	if (status == 0) {
1238 		switch (error) {
1239 		case -ETIMEDOUT:
1240 		case -EPFNOSUPPORT:
1241 		case -EPROTONOSUPPORT:
1242 		case -EOPNOTSUPP:
1243 		case -EINVAL:
1244 		case -ECONNREFUSED:
1245 		case -ECONNRESET:
1246 		case -EHOSTDOWN:
1247 		case -EHOSTUNREACH:
1248 		case -ENETUNREACH:
1249 		case -EADDRINUSE:
1250 		case -ENOBUFS:
1251 		case -EPIPE:
1252 		case -EPERM:
1253 			*op_status = status = NFS4ERR_NXIO;
1254 			break;
1255 		case -EACCES:
1256 			*op_status = status = NFS4ERR_ACCESS;
1257 			break;
1258 		default:
1259 			return;
1260 		}
1261 	}
1262 
1263 	mirror = FF_LAYOUT_COMP(lseg, idx);
1264 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1265 				       mirror, offset, length, status, opnum,
1266 				       GFP_NOIO);
1267 
1268 	switch (status) {
1269 	case NFS4ERR_DELAY:
1270 	case NFS4ERR_GRACE:
1271 		break;
1272 	case NFS4ERR_NXIO:
1273 		ff_layout_mark_ds_unreachable(lseg, idx);
1274 		/*
1275 		 * Don't return the layout if this is a read and we still
1276 		 * have layouts to try
1277 		 */
1278 		if (opnum == OP_READ)
1279 			break;
1280 		fallthrough;
1281 	default:
1282 		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1283 						  lseg);
1284 	}
1285 
1286 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1287 }
1288 
1289 /* NFS_PROTO call done callback routines */
ff_layout_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)1290 static int ff_layout_read_done_cb(struct rpc_task *task,
1291 				struct nfs_pgio_header *hdr)
1292 {
1293 	int err;
1294 
1295 	if (task->tk_status < 0) {
1296 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1297 					    hdr->args.offset, hdr->args.count,
1298 					    &hdr->res.op_status, OP_READ,
1299 					    task->tk_status);
1300 		trace_ff_layout_read_error(hdr);
1301 	}
1302 
1303 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1304 					   hdr->ds_clp, hdr->lseg,
1305 					   hdr->pgio_mirror_idx);
1306 
1307 	trace_nfs4_pnfs_read(hdr, err);
1308 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1309 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1310 	switch (err) {
1311 	case -NFS4ERR_RESET_TO_PNFS:
1312 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1313 		return task->tk_status;
1314 	case -NFS4ERR_RESET_TO_MDS:
1315 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1316 		return task->tk_status;
1317 	case -EAGAIN:
1318 		goto out_eagain;
1319 	}
1320 
1321 	return 0;
1322 out_eagain:
1323 	rpc_restart_call_prepare(task);
1324 	return -EAGAIN;
1325 }
1326 
1327 static bool
ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)1328 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1329 {
1330 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1331 }
1332 
1333 /*
1334  * We reference the rpc_cred of the first WRITE that triggers the need for
1335  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1336  * rfc5661 is not clear about which credential should be used.
1337  *
1338  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1339  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1340  * we always send layoutcommit after DS writes.
1341  */
1342 static void
ff_layout_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg, loff_t end_offset)1343 ff_layout_set_layoutcommit(struct inode *inode,
1344 		struct pnfs_layout_segment *lseg,
1345 		loff_t end_offset)
1346 {
1347 	if (!ff_layout_need_layoutcommit(lseg))
1348 		return;
1349 
1350 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1351 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1352 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1353 }
1354 
ff_layout_read_record_layoutstats_start(struct rpc_task *task, struct nfs_pgio_header *hdr)1355 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1356 		struct nfs_pgio_header *hdr)
1357 {
1358 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1359 		return;
1360 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1361 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1362 			hdr->args.count,
1363 			task->tk_start);
1364 }
1365 
ff_layout_read_record_layoutstats_done(struct rpc_task *task, struct nfs_pgio_header *hdr)1366 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1367 		struct nfs_pgio_header *hdr)
1368 {
1369 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1370 		return;
1371 	nfs4_ff_layout_stat_io_end_read(task,
1372 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1373 			hdr->args.count,
1374 			hdr->res.count);
1375 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1376 }
1377 
ff_layout_read_prepare_common(struct rpc_task *task, struct nfs_pgio_header *hdr)1378 static int ff_layout_read_prepare_common(struct rpc_task *task,
1379 					 struct nfs_pgio_header *hdr)
1380 {
1381 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1382 		rpc_exit(task, -EIO);
1383 		return -EIO;
1384 	}
1385 
1386 	ff_layout_read_record_layoutstats_start(task, hdr);
1387 	return 0;
1388 }
1389 
1390 /*
1391  * Call ops for the async read/write cases
1392  * In the case of dense layouts, the offset needs to be reset to its
1393  * original value.
1394  */
ff_layout_read_prepare_v3(struct rpc_task *task, void *data)1395 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1396 {
1397 	struct nfs_pgio_header *hdr = data;
1398 
1399 	if (ff_layout_read_prepare_common(task, hdr))
1400 		return;
1401 
1402 	rpc_call_start(task);
1403 }
1404 
ff_layout_read_prepare_v4(struct rpc_task *task, void *data)1405 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1406 {
1407 	struct nfs_pgio_header *hdr = data;
1408 
1409 	if (nfs4_setup_sequence(hdr->ds_clp,
1410 				&hdr->args.seq_args,
1411 				&hdr->res.seq_res,
1412 				task))
1413 		return;
1414 
1415 	ff_layout_read_prepare_common(task, hdr);
1416 }
1417 
ff_layout_read_call_done(struct rpc_task *task, void *data)1418 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1419 {
1420 	struct nfs_pgio_header *hdr = data;
1421 
1422 	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
1423 
1424 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1425 	    task->tk_status == 0) {
1426 		nfs4_sequence_done(task, &hdr->res.seq_res);
1427 		return;
1428 	}
1429 
1430 	/* Note this may cause RPC to be resent */
1431 	hdr->mds_ops->rpc_call_done(task, hdr);
1432 }
1433 
ff_layout_read_count_stats(struct rpc_task *task, void *data)1434 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1435 {
1436 	struct nfs_pgio_header *hdr = data;
1437 
1438 	ff_layout_read_record_layoutstats_done(task, hdr);
1439 	rpc_count_iostats_metrics(task,
1440 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1441 }
1442 
ff_layout_read_release(void *data)1443 static void ff_layout_read_release(void *data)
1444 {
1445 	struct nfs_pgio_header *hdr = data;
1446 
1447 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1448 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1449 		ff_layout_resend_pnfs_read(hdr);
1450 	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1451 		ff_layout_reset_read(hdr);
1452 	pnfs_generic_rw_release(data);
1453 }
1454 
1455 
ff_layout_write_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)1456 static int ff_layout_write_done_cb(struct rpc_task *task,
1457 				struct nfs_pgio_header *hdr)
1458 {
1459 	loff_t end_offs = 0;
1460 	int err;
1461 
1462 	if (task->tk_status < 0) {
1463 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1464 					    hdr->args.offset, hdr->args.count,
1465 					    &hdr->res.op_status, OP_WRITE,
1466 					    task->tk_status);
1467 		trace_ff_layout_write_error(hdr);
1468 	}
1469 
1470 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1471 					   hdr->ds_clp, hdr->lseg,
1472 					   hdr->pgio_mirror_idx);
1473 
1474 	trace_nfs4_pnfs_write(hdr, err);
1475 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1476 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1477 	switch (err) {
1478 	case -NFS4ERR_RESET_TO_PNFS:
1479 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1480 		return task->tk_status;
1481 	case -NFS4ERR_RESET_TO_MDS:
1482 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1483 		return task->tk_status;
1484 	case -EAGAIN:
1485 		return -EAGAIN;
1486 	}
1487 
1488 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1489 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1490 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1491 
1492 	/* Note: if the write is unstable, don't set end_offs until commit */
1493 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1494 
1495 	/* zero out fattr since we don't care DS attr at all */
1496 	hdr->fattr.valid = 0;
1497 	if (task->tk_status >= 0)
1498 		nfs_writeback_update_inode(hdr);
1499 
1500 	return 0;
1501 }
1502 
ff_layout_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)1503 static int ff_layout_commit_done_cb(struct rpc_task *task,
1504 				     struct nfs_commit_data *data)
1505 {
1506 	int err;
1507 
1508 	if (task->tk_status < 0) {
1509 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1510 					    data->args.offset, data->args.count,
1511 					    &data->res.op_status, OP_COMMIT,
1512 					    task->tk_status);
1513 		trace_ff_layout_commit_error(data);
1514 	}
1515 
1516 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1517 					   data->lseg, data->ds_commit_index);
1518 
1519 	trace_nfs4_pnfs_commit_ds(data, err);
1520 	switch (err) {
1521 	case -NFS4ERR_RESET_TO_PNFS:
1522 		pnfs_generic_prepare_to_resend_writes(data);
1523 		return -EAGAIN;
1524 	case -NFS4ERR_RESET_TO_MDS:
1525 		pnfs_generic_prepare_to_resend_writes(data);
1526 		return -EAGAIN;
1527 	case -EAGAIN:
1528 		rpc_restart_call_prepare(task);
1529 		return -EAGAIN;
1530 	}
1531 
1532 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1533 
1534 	return 0;
1535 }
1536 
ff_layout_write_record_layoutstats_start(struct rpc_task *task, struct nfs_pgio_header *hdr)1537 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1538 		struct nfs_pgio_header *hdr)
1539 {
1540 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1541 		return;
1542 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1543 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1544 			hdr->args.count,
1545 			task->tk_start);
1546 }
1547 
ff_layout_write_record_layoutstats_done(struct rpc_task *task, struct nfs_pgio_header *hdr)1548 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1549 		struct nfs_pgio_header *hdr)
1550 {
1551 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1552 		return;
1553 	nfs4_ff_layout_stat_io_end_write(task,
1554 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1555 			hdr->args.count, hdr->res.count,
1556 			hdr->res.verf->committed);
1557 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1558 }
1559 
ff_layout_write_prepare_common(struct rpc_task *task, struct nfs_pgio_header *hdr)1560 static int ff_layout_write_prepare_common(struct rpc_task *task,
1561 					  struct nfs_pgio_header *hdr)
1562 {
1563 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1564 		rpc_exit(task, -EIO);
1565 		return -EIO;
1566 	}
1567 
1568 	ff_layout_write_record_layoutstats_start(task, hdr);
1569 	return 0;
1570 }
1571 
ff_layout_write_prepare_v3(struct rpc_task *task, void *data)1572 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1573 {
1574 	struct nfs_pgio_header *hdr = data;
1575 
1576 	if (ff_layout_write_prepare_common(task, hdr))
1577 		return;
1578 
1579 	rpc_call_start(task);
1580 }
1581 
ff_layout_write_prepare_v4(struct rpc_task *task, void *data)1582 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1583 {
1584 	struct nfs_pgio_header *hdr = data;
1585 
1586 	if (nfs4_setup_sequence(hdr->ds_clp,
1587 				&hdr->args.seq_args,
1588 				&hdr->res.seq_res,
1589 				task))
1590 		return;
1591 
1592 	ff_layout_write_prepare_common(task, hdr);
1593 }
1594 
ff_layout_write_call_done(struct rpc_task *task, void *data)1595 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1596 {
1597 	struct nfs_pgio_header *hdr = data;
1598 
1599 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1600 	    task->tk_status == 0) {
1601 		nfs4_sequence_done(task, &hdr->res.seq_res);
1602 		return;
1603 	}
1604 
1605 	/* Note this may cause RPC to be resent */
1606 	hdr->mds_ops->rpc_call_done(task, hdr);
1607 }
1608 
ff_layout_write_count_stats(struct rpc_task *task, void *data)1609 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1610 {
1611 	struct nfs_pgio_header *hdr = data;
1612 
1613 	ff_layout_write_record_layoutstats_done(task, hdr);
1614 	rpc_count_iostats_metrics(task,
1615 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1616 }
1617 
ff_layout_write_release(void *data)1618 static void ff_layout_write_release(void *data)
1619 {
1620 	struct nfs_pgio_header *hdr = data;
1621 
1622 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1623 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1624 		ff_layout_send_layouterror(hdr->lseg);
1625 		ff_layout_reset_write(hdr, true);
1626 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1627 		ff_layout_reset_write(hdr, false);
1628 	pnfs_generic_rw_release(data);
1629 }
1630 
ff_layout_commit_record_layoutstats_start(struct rpc_task *task, struct nfs_commit_data *cdata)1631 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1632 		struct nfs_commit_data *cdata)
1633 {
1634 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1635 		return;
1636 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1637 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1638 			0, task->tk_start);
1639 }
1640 
ff_layout_commit_record_layoutstats_done(struct rpc_task *task, struct nfs_commit_data *cdata)1641 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1642 		struct nfs_commit_data *cdata)
1643 {
1644 	struct nfs_page *req;
1645 	__u64 count = 0;
1646 
1647 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1648 		return;
1649 
1650 	if (task->tk_status == 0) {
1651 		list_for_each_entry(req, &cdata->pages, wb_list)
1652 			count += req->wb_bytes;
1653 	}
1654 	nfs4_ff_layout_stat_io_end_write(task,
1655 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1656 			count, count, NFS_FILE_SYNC);
1657 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1658 }
1659 
ff_layout_commit_prepare_common(struct rpc_task *task, struct nfs_commit_data *cdata)1660 static void ff_layout_commit_prepare_common(struct rpc_task *task,
1661 		struct nfs_commit_data *cdata)
1662 {
1663 	ff_layout_commit_record_layoutstats_start(task, cdata);
1664 }
1665 
ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)1666 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1667 {
1668 	ff_layout_commit_prepare_common(task, data);
1669 	rpc_call_start(task);
1670 }
1671 
ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)1672 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1673 {
1674 	struct nfs_commit_data *wdata = data;
1675 
1676 	if (nfs4_setup_sequence(wdata->ds_clp,
1677 				&wdata->args.seq_args,
1678 				&wdata->res.seq_res,
1679 				task))
1680 		return;
1681 	ff_layout_commit_prepare_common(task, data);
1682 }
1683 
ff_layout_commit_done(struct rpc_task *task, void *data)1684 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1685 {
1686 	pnfs_generic_write_commit_done(task, data);
1687 }
1688 
ff_layout_commit_count_stats(struct rpc_task *task, void *data)1689 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1690 {
1691 	struct nfs_commit_data *cdata = data;
1692 
1693 	ff_layout_commit_record_layoutstats_done(task, cdata);
1694 	rpc_count_iostats_metrics(task,
1695 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1696 }
1697 
ff_layout_commit_release(void *data)1698 static void ff_layout_commit_release(void *data)
1699 {
1700 	struct nfs_commit_data *cdata = data;
1701 
1702 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1703 	pnfs_generic_commit_release(data);
1704 }
1705 
1706 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1707 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1708 	.rpc_call_done = ff_layout_read_call_done,
1709 	.rpc_count_stats = ff_layout_read_count_stats,
1710 	.rpc_release = ff_layout_read_release,
1711 };
1712 
1713 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1714 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1715 	.rpc_call_done = ff_layout_read_call_done,
1716 	.rpc_count_stats = ff_layout_read_count_stats,
1717 	.rpc_release = ff_layout_read_release,
1718 };
1719 
1720 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1721 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1722 	.rpc_call_done = ff_layout_write_call_done,
1723 	.rpc_count_stats = ff_layout_write_count_stats,
1724 	.rpc_release = ff_layout_write_release,
1725 };
1726 
1727 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1728 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1729 	.rpc_call_done = ff_layout_write_call_done,
1730 	.rpc_count_stats = ff_layout_write_count_stats,
1731 	.rpc_release = ff_layout_write_release,
1732 };
1733 
1734 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1735 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1736 	.rpc_call_done = ff_layout_commit_done,
1737 	.rpc_count_stats = ff_layout_commit_count_stats,
1738 	.rpc_release = ff_layout_commit_release,
1739 };
1740 
1741 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1742 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1743 	.rpc_call_done = ff_layout_commit_done,
1744 	.rpc_count_stats = ff_layout_commit_count_stats,
1745 	.rpc_release = ff_layout_commit_release,
1746 };
1747 
1748 static enum pnfs_try_status
ff_layout_read_pagelist(struct nfs_pgio_header *hdr)1749 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1750 {
1751 	struct pnfs_layout_segment *lseg = hdr->lseg;
1752 	struct nfs4_pnfs_ds *ds;
1753 	struct rpc_clnt *ds_clnt;
1754 	struct nfs4_ff_layout_mirror *mirror;
1755 	const struct cred *ds_cred;
1756 	loff_t offset = hdr->args.offset;
1757 	u32 idx = hdr->pgio_mirror_idx;
1758 	int vers;
1759 	struct nfs_fh *fh;
1760 
1761 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1762 		__func__, hdr->inode->i_ino,
1763 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1764 
1765 	mirror = FF_LAYOUT_COMP(lseg, idx);
1766 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1767 	if (!ds)
1768 		goto out_failed;
1769 
1770 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1771 						   hdr->inode);
1772 	if (IS_ERR(ds_clnt))
1773 		goto out_failed;
1774 
1775 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1776 	if (!ds_cred)
1777 		goto out_failed;
1778 
1779 	vers = nfs4_ff_layout_ds_version(mirror);
1780 
1781 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1782 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1783 
1784 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1785 	refcount_inc(&ds->ds_clp->cl_count);
1786 	hdr->ds_clp = ds->ds_clp;
1787 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1788 	if (fh)
1789 		hdr->args.fh = fh;
1790 
1791 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1792 
1793 	/*
1794 	 * Note that if we ever decide to split across DSes,
1795 	 * then we may need to handle dense-like offsets.
1796 	 */
1797 	hdr->args.offset = offset;
1798 	hdr->mds_offset = offset;
1799 
1800 	/* Perform an asynchronous read to ds */
1801 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1802 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1803 				      &ff_layout_read_call_ops_v4,
1804 			  0, RPC_TASK_SOFTCONN);
1805 	put_cred(ds_cred);
1806 	return PNFS_ATTEMPTED;
1807 
1808 out_failed:
1809 	if (ff_layout_avoid_mds_available_ds(lseg))
1810 		return PNFS_TRY_AGAIN;
1811 	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1812 			hdr->args.offset, hdr->args.count,
1813 			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1814 	return PNFS_NOT_ATTEMPTED;
1815 }
1816 
1817 /* Perform async writes. */
1818 static enum pnfs_try_status
ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)1819 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1820 {
1821 	struct pnfs_layout_segment *lseg = hdr->lseg;
1822 	struct nfs4_pnfs_ds *ds;
1823 	struct rpc_clnt *ds_clnt;
1824 	struct nfs4_ff_layout_mirror *mirror;
1825 	const struct cred *ds_cred;
1826 	loff_t offset = hdr->args.offset;
1827 	int vers;
1828 	struct nfs_fh *fh;
1829 	u32 idx = hdr->pgio_mirror_idx;
1830 
1831 	mirror = FF_LAYOUT_COMP(lseg, idx);
1832 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1833 	if (!ds)
1834 		goto out_failed;
1835 
1836 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1837 						   hdr->inode);
1838 	if (IS_ERR(ds_clnt))
1839 		goto out_failed;
1840 
1841 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1842 	if (!ds_cred)
1843 		goto out_failed;
1844 
1845 	vers = nfs4_ff_layout_ds_version(mirror);
1846 
1847 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1848 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1849 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1850 		vers);
1851 
1852 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1853 	refcount_inc(&ds->ds_clp->cl_count);
1854 	hdr->ds_clp = ds->ds_clp;
1855 	hdr->ds_commit_idx = idx;
1856 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1857 	if (fh)
1858 		hdr->args.fh = fh;
1859 
1860 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1861 
1862 	/*
1863 	 * Note that if we ever decide to split across DSes,
1864 	 * then we may need to handle dense-like offsets.
1865 	 */
1866 	hdr->args.offset = offset;
1867 
1868 	/* Perform an asynchronous write */
1869 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1870 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1871 				      &ff_layout_write_call_ops_v4,
1872 			  sync, RPC_TASK_SOFTCONN);
1873 	put_cred(ds_cred);
1874 	return PNFS_ATTEMPTED;
1875 
1876 out_failed:
1877 	if (ff_layout_avoid_mds_available_ds(lseg))
1878 		return PNFS_TRY_AGAIN;
1879 	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1880 			hdr->args.offset, hdr->args.count,
1881 			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1882 	return PNFS_NOT_ATTEMPTED;
1883 }
1884 
calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)1885 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1886 {
1887 	return i;
1888 }
1889 
1890 static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)1891 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1892 {
1893 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1894 
1895 	/* FIXME: Assume that there is only one NFS version available
1896 	 * for the DS.
1897 	 */
1898 	return &flseg->mirror_array[i]->fh_versions[0];
1899 }
1900 
ff_layout_initiate_commit(struct nfs_commit_data *data, int how)1901 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1902 {
1903 	struct pnfs_layout_segment *lseg = data->lseg;
1904 	struct nfs4_pnfs_ds *ds;
1905 	struct rpc_clnt *ds_clnt;
1906 	struct nfs4_ff_layout_mirror *mirror;
1907 	const struct cred *ds_cred;
1908 	u32 idx;
1909 	int vers, ret;
1910 	struct nfs_fh *fh;
1911 
1912 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1913 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1914 		goto out_err;
1915 
1916 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1917 	mirror = FF_LAYOUT_COMP(lseg, idx);
1918 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1919 	if (!ds)
1920 		goto out_err;
1921 
1922 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1923 						   data->inode);
1924 	if (IS_ERR(ds_clnt))
1925 		goto out_err;
1926 
1927 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
1928 	if (!ds_cred)
1929 		goto out_err;
1930 
1931 	vers = nfs4_ff_layout_ds_version(mirror);
1932 
1933 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
1934 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
1935 		vers);
1936 	data->commit_done_cb = ff_layout_commit_done_cb;
1937 	data->cred = ds_cred;
1938 	refcount_inc(&ds->ds_clp->cl_count);
1939 	data->ds_clp = ds->ds_clp;
1940 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
1941 	if (fh)
1942 		data->args.fh = fh;
1943 
1944 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
1945 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
1946 					       &ff_layout_commit_call_ops_v4,
1947 				   how, RPC_TASK_SOFTCONN);
1948 	put_cred(ds_cred);
1949 	return ret;
1950 out_err:
1951 	pnfs_generic_prepare_to_resend_writes(data);
1952 	pnfs_generic_commit_release(data);
1953 	return -EAGAIN;
1954 }
1955 
1956 static int
ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, int how, struct nfs_commit_info *cinfo)1957 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1958 			   int how, struct nfs_commit_info *cinfo)
1959 {
1960 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
1961 					    ff_layout_initiate_commit);
1962 }
1963 
1964 static struct pnfs_ds_commit_info *
ff_layout_get_ds_info(struct inode *inode)1965 ff_layout_get_ds_info(struct inode *inode)
1966 {
1967 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1968 
1969 	if (layout == NULL)
1970 		return NULL;
1971 
1972 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
1973 }
1974 
1975 static void
ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo, struct pnfs_layout_segment *lseg)1976 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
1977 		struct pnfs_layout_segment *lseg)
1978 {
1979 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1980 	struct inode *inode = lseg->pls_layout->plh_inode;
1981 	struct pnfs_commit_array *array, *new;
1982 
1983 	new = pnfs_alloc_commit_array(flseg->mirror_array_cnt, GFP_NOIO);
1984 	if (new) {
1985 		spin_lock(&inode->i_lock);
1986 		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
1987 		spin_unlock(&inode->i_lock);
1988 		if (array != new)
1989 			pnfs_free_commit_array(new);
1990 	}
1991 }
1992 
1993 static void
ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo, struct inode *inode)1994 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
1995 		struct inode *inode)
1996 {
1997 	spin_lock(&inode->i_lock);
1998 	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
1999 	spin_unlock(&inode->i_lock);
2000 }
2001 
2002 static void
ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)2003 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2004 {
2005 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2006 						  id_node));
2007 }
2008 
ff_layout_encode_ioerr(struct xdr_stream *xdr, const struct nfs4_layoutreturn_args *args, const struct nfs4_flexfile_layoutreturn_args *ff_args)2009 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2010 				  const struct nfs4_layoutreturn_args *args,
2011 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2012 {
2013 	__be32 *start;
2014 
2015 	start = xdr_reserve_space(xdr, 4);
2016 	if (unlikely(!start))
2017 		return -E2BIG;
2018 
2019 	*start = cpu_to_be32(ff_args->num_errors);
2020 	/* This assume we always return _ALL_ layouts */
2021 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2022 }
2023 
2024 static void
encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)2025 encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
2026 {
2027 	WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0);
2028 }
2029 
2030 static void
ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr, const nfs4_stateid *stateid, const struct nfs42_layoutstat_devinfo *devinfo)2031 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2032 			    const nfs4_stateid *stateid,
2033 			    const struct nfs42_layoutstat_devinfo *devinfo)
2034 {
2035 	__be32 *p;
2036 
2037 	p = xdr_reserve_space(xdr, 8 + 8);
2038 	p = xdr_encode_hyper(p, devinfo->offset);
2039 	p = xdr_encode_hyper(p, devinfo->length);
2040 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2041 	p = xdr_reserve_space(xdr, 4*8);
2042 	p = xdr_encode_hyper(p, devinfo->read_count);
2043 	p = xdr_encode_hyper(p, devinfo->read_bytes);
2044 	p = xdr_encode_hyper(p, devinfo->write_count);
2045 	p = xdr_encode_hyper(p, devinfo->write_bytes);
2046 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2047 }
2048 
2049 static void
ff_layout_encode_ff_iostat(struct xdr_stream *xdr, const nfs4_stateid *stateid, const struct nfs42_layoutstat_devinfo *devinfo)2050 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2051 			    const nfs4_stateid *stateid,
2052 			    const struct nfs42_layoutstat_devinfo *devinfo)
2053 {
2054 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2055 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2056 			devinfo->ld_private.data);
2057 }
2058 
2059 /* report nothing for now */
ff_layout_encode_iostats_array(struct xdr_stream *xdr, const struct nfs4_layoutreturn_args *args, struct nfs4_flexfile_layoutreturn_args *ff_args)2060 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2061 		const struct nfs4_layoutreturn_args *args,
2062 		struct nfs4_flexfile_layoutreturn_args *ff_args)
2063 {
2064 	__be32 *p;
2065 	int i;
2066 
2067 	p = xdr_reserve_space(xdr, 4);
2068 	*p = cpu_to_be32(ff_args->num_dev);
2069 	for (i = 0; i < ff_args->num_dev; i++)
2070 		ff_layout_encode_ff_iostat(xdr,
2071 				&args->layout->plh_stateid,
2072 				&ff_args->devinfo[i]);
2073 }
2074 
2075 static void
ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo, unsigned int num_entries)2076 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2077 		unsigned int num_entries)
2078 {
2079 	unsigned int i;
2080 
2081 	for (i = 0; i < num_entries; i++) {
2082 		if (!devinfo[i].ld_private.ops)
2083 			continue;
2084 		if (!devinfo[i].ld_private.ops->free)
2085 			continue;
2086 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2087 	}
2088 }
2089 
2090 static struct nfs4_deviceid_node *
ff_layout_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, gfp_t gfp_flags)2091 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2092 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2093 {
2094 	struct nfs4_ff_layout_ds *dsaddr;
2095 
2096 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2097 	if (!dsaddr)
2098 		return NULL;
2099 	return &dsaddr->id_node;
2100 }
2101 
2102 static void
ff_layout_encode_layoutreturn(struct xdr_stream *xdr, const void *voidargs, const struct nfs4_xdr_opaque_data *ff_opaque)2103 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2104 		const void *voidargs,
2105 		const struct nfs4_xdr_opaque_data *ff_opaque)
2106 {
2107 	const struct nfs4_layoutreturn_args *args = voidargs;
2108 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2109 	struct xdr_buf tmp_buf = {
2110 		.head = {
2111 			[0] = {
2112 				.iov_base = page_address(ff_args->pages[0]),
2113 			},
2114 		},
2115 		.buflen = PAGE_SIZE,
2116 	};
2117 	struct xdr_stream tmp_xdr;
2118 	__be32 *start;
2119 
2120 	dprintk("%s: Begin\n", __func__);
2121 
2122 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2123 
2124 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2125 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2126 
2127 	start = xdr_reserve_space(xdr, 4);
2128 	*start = cpu_to_be32(tmp_buf.len);
2129 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2130 
2131 	dprintk("%s: Return\n", __func__);
2132 }
2133 
2134 static void
ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)2135 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2136 {
2137 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2138 
2139 	if (!args->data)
2140 		return;
2141 	ff_args = args->data;
2142 	args->data = NULL;
2143 
2144 	ff_layout_free_ds_ioerr(&ff_args->errors);
2145 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2146 
2147 	put_page(ff_args->pages[0]);
2148 	kfree(ff_args);
2149 }
2150 
2151 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2152 	.encode = ff_layout_encode_layoutreturn,
2153 	.free = ff_layout_free_layoutreturn,
2154 };
2155 
2156 static int
ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)2157 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2158 {
2159 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2160 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2161 
2162 	ff_args = kmalloc(sizeof(*ff_args), GFP_KERNEL);
2163 	if (!ff_args)
2164 		goto out_nomem;
2165 	ff_args->pages[0] = alloc_page(GFP_KERNEL);
2166 	if (!ff_args->pages[0])
2167 		goto out_nomem_free;
2168 
2169 	INIT_LIST_HEAD(&ff_args->errors);
2170 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2171 			&args->range, &ff_args->errors,
2172 			FF_LAYOUTRETURN_MAXERR);
2173 
2174 	spin_lock(&args->inode->i_lock);
2175 	ff_args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2176 			&ff_args->devinfo[0], ARRAY_SIZE(ff_args->devinfo));
2177 	spin_unlock(&args->inode->i_lock);
2178 
2179 	args->ld_private->ops = &layoutreturn_ops;
2180 	args->ld_private->data = ff_args;
2181 	return 0;
2182 out_nomem_free:
2183 	kfree(ff_args);
2184 out_nomem:
2185 	return -ENOMEM;
2186 }
2187 
2188 #ifdef CONFIG_NFS_V4_2
2189 void
ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)2190 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2191 {
2192 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2193 	struct nfs42_layout_error *errors;
2194 	LIST_HEAD(head);
2195 
2196 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2197 		return;
2198 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2199 	if (list_empty(&head))
2200 		return;
2201 
2202 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX,
2203 			sizeof(*errors), GFP_NOFS);
2204 	if (errors != NULL) {
2205 		const struct nfs4_ff_layout_ds_err *pos;
2206 		size_t n = 0;
2207 
2208 		list_for_each_entry(pos, &head, list) {
2209 			errors[n].offset = pos->offset;
2210 			errors[n].length = pos->length;
2211 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2212 			errors[n].errors[0].dev_id = pos->deviceid;
2213 			errors[n].errors[0].status = pos->status;
2214 			errors[n].errors[0].opnum = pos->opnum;
2215 			n++;
2216 			if (!list_is_last(&pos->list, &head) &&
2217 			    n < NFS42_LAYOUTERROR_MAX)
2218 				continue;
2219 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2220 				break;
2221 			n = 0;
2222 		}
2223 		kfree(errors);
2224 	}
2225 	ff_layout_free_ds_ioerr(&head);
2226 }
2227 #else
2228 void
ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)2229 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2230 {
2231 }
2232 #endif
2233 
2234 static int
ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)2235 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2236 {
2237 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2238 
2239 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2240 }
2241 
2242 static size_t
ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf, const int buflen)2243 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2244 			  const int buflen)
2245 {
2246 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2247 	const struct in6_addr *addr = &sin6->sin6_addr;
2248 
2249 	/*
2250 	 * RFC 4291, Section 2.2.2
2251 	 *
2252 	 * Shorthanded ANY address
2253 	 */
2254 	if (ipv6_addr_any(addr))
2255 		return snprintf(buf, buflen, "::");
2256 
2257 	/*
2258 	 * RFC 4291, Section 2.2.2
2259 	 *
2260 	 * Shorthanded loopback address
2261 	 */
2262 	if (ipv6_addr_loopback(addr))
2263 		return snprintf(buf, buflen, "::1");
2264 
2265 	/*
2266 	 * RFC 4291, Section 2.2.3
2267 	 *
2268 	 * Special presentation address format for mapped v4
2269 	 * addresses.
2270 	 */
2271 	if (ipv6_addr_v4mapped(addr))
2272 		return snprintf(buf, buflen, "::ffff:%pI4",
2273 					&addr->s6_addr32[3]);
2274 
2275 	/*
2276 	 * RFC 4291, Section 2.2.1
2277 	 */
2278 	return snprintf(buf, buflen, "%pI6c", addr);
2279 }
2280 
2281 /* Derived from rpc_sockaddr2uaddr */
2282 static void
ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)2283 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2284 {
2285 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2286 	char portbuf[RPCBIND_MAXUADDRPLEN];
2287 	char addrbuf[RPCBIND_MAXUADDRLEN];
2288 	char *netid;
2289 	unsigned short port;
2290 	int len, netid_len;
2291 	__be32 *p;
2292 
2293 	switch (sap->sa_family) {
2294 	case AF_INET:
2295 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2296 			return;
2297 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2298 		netid = "tcp";
2299 		netid_len = 3;
2300 		break;
2301 	case AF_INET6:
2302 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2303 			return;
2304 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2305 		netid = "tcp6";
2306 		netid_len = 4;
2307 		break;
2308 	default:
2309 		/* we only support tcp and tcp6 */
2310 		WARN_ON_ONCE(1);
2311 		return;
2312 	}
2313 
2314 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2315 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2316 
2317 	p = xdr_reserve_space(xdr, 4 + netid_len);
2318 	xdr_encode_opaque(p, netid, netid_len);
2319 
2320 	p = xdr_reserve_space(xdr, 4 + len);
2321 	xdr_encode_opaque(p, addrbuf, len);
2322 }
2323 
2324 static void
ff_layout_encode_nfstime(struct xdr_stream *xdr, ktime_t t)2325 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2326 			 ktime_t t)
2327 {
2328 	struct timespec64 ts;
2329 	__be32 *p;
2330 
2331 	p = xdr_reserve_space(xdr, 12);
2332 	ts = ktime_to_timespec64(t);
2333 	p = xdr_encode_hyper(p, ts.tv_sec);
2334 	*p++ = cpu_to_be32(ts.tv_nsec);
2335 }
2336 
2337 static void
ff_layout_encode_io_latency(struct xdr_stream *xdr, struct nfs4_ff_io_stat *stat)2338 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2339 			    struct nfs4_ff_io_stat *stat)
2340 {
2341 	__be32 *p;
2342 
2343 	p = xdr_reserve_space(xdr, 5 * 8);
2344 	p = xdr_encode_hyper(p, stat->ops_requested);
2345 	p = xdr_encode_hyper(p, stat->bytes_requested);
2346 	p = xdr_encode_hyper(p, stat->ops_completed);
2347 	p = xdr_encode_hyper(p, stat->bytes_completed);
2348 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2349 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2350 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2351 }
2352 
2353 static void
ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr, const struct nfs42_layoutstat_devinfo *devinfo, struct nfs4_ff_layout_mirror *mirror)2354 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2355 			      const struct nfs42_layoutstat_devinfo *devinfo,
2356 			      struct nfs4_ff_layout_mirror *mirror)
2357 {
2358 	struct nfs4_pnfs_ds_addr *da;
2359 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2360 	struct nfs_fh *fh = &mirror->fh_versions[0];
2361 	__be32 *p;
2362 
2363 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2364 	dprintk("%s: DS %s: encoding address %s\n",
2365 		__func__, ds->ds_remotestr, da->da_remotestr);
2366 	/* netaddr4 */
2367 	ff_layout_encode_netaddr(xdr, da);
2368 	/* nfs_fh4 */
2369 	p = xdr_reserve_space(xdr, 4 + fh->size);
2370 	xdr_encode_opaque(p, fh->data, fh->size);
2371 	/* ff_io_latency4 read */
2372 	spin_lock(&mirror->lock);
2373 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2374 	/* ff_io_latency4 write */
2375 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2376 	spin_unlock(&mirror->lock);
2377 	/* nfstime4 */
2378 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2379 	/* bool */
2380 	p = xdr_reserve_space(xdr, 4);
2381 	*p = cpu_to_be32(false);
2382 }
2383 
2384 static void
ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args, const struct nfs4_xdr_opaque_data *opaque)2385 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2386 			     const struct nfs4_xdr_opaque_data *opaque)
2387 {
2388 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2389 			struct nfs42_layoutstat_devinfo, ld_private);
2390 	__be32 *start;
2391 
2392 	/* layoutupdate length */
2393 	start = xdr_reserve_space(xdr, 4);
2394 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2395 
2396 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2397 }
2398 
2399 static void
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)2400 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2401 {
2402 	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2403 
2404 	ff_layout_put_mirror(mirror);
2405 }
2406 
2407 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2408 	.encode = ff_layout_encode_layoutstats,
2409 	.free	= ff_layout_free_layoutstats,
2410 };
2411 
2412 static int
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo, struct nfs42_layoutstat_devinfo *devinfo, int dev_limit)2413 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2414 			       struct nfs42_layoutstat_devinfo *devinfo,
2415 			       int dev_limit)
2416 {
2417 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2418 	struct nfs4_ff_layout_mirror *mirror;
2419 	struct nfs4_deviceid_node *dev;
2420 	int i = 0;
2421 
2422 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2423 		if (i >= dev_limit)
2424 			break;
2425 		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2426 			continue;
2427 		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
2428 			continue;
2429 		/* mirror refcount put in cleanup_layoutstats */
2430 		if (!refcount_inc_not_zero(&mirror->ref))
2431 			continue;
2432 		dev = &mirror->mirror_ds->id_node;
2433 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2434 		devinfo->offset = 0;
2435 		devinfo->length = NFS4_MAX_UINT64;
2436 		spin_lock(&mirror->lock);
2437 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2438 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2439 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2440 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2441 		spin_unlock(&mirror->lock);
2442 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2443 		devinfo->ld_private.ops = &layoutstat_ops;
2444 		devinfo->ld_private.data = mirror;
2445 
2446 		devinfo++;
2447 		i++;
2448 	}
2449 	return i;
2450 }
2451 
2452 static int
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)2453 ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2454 {
2455 	struct nfs4_flexfile_layout *ff_layout;
2456 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2457 
2458 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2459 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo), GFP_NOIO);
2460 	if (!args->devinfo)
2461 		return -ENOMEM;
2462 
2463 	spin_lock(&args->inode->i_lock);
2464 	ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
2465 	args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
2466 			&args->devinfo[0], dev_count);
2467 	spin_unlock(&args->inode->i_lock);
2468 	if (!args->num_dev) {
2469 		kfree(args->devinfo);
2470 		args->devinfo = NULL;
2471 		return -ENOENT;
2472 	}
2473 
2474 	return 0;
2475 }
2476 
2477 static int
ff_layout_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *dummy)2478 ff_layout_set_layoutdriver(struct nfs_server *server,
2479 		const struct nfs_fh *dummy)
2480 {
2481 #if IS_ENABLED(CONFIG_NFS_V4_2)
2482 	server->caps |= NFS_CAP_LAYOUTSTATS;
2483 #endif
2484 	return 0;
2485 }
2486 
2487 static const struct pnfs_commit_ops ff_layout_commit_ops = {
2488 	.setup_ds_info		= ff_layout_setup_ds_info,
2489 	.release_ds_info	= ff_layout_release_ds_info,
2490 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2491 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2492 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2493 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2494 	.commit_pagelist	= ff_layout_commit_pagelist,
2495 };
2496 
2497 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2498 	.id			= LAYOUT_FLEX_FILES,
2499 	.name			= "LAYOUT_FLEX_FILES",
2500 	.owner			= THIS_MODULE,
2501 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2502 	.max_layoutget_response	= 4096, /* 1 page or so... */
2503 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2504 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2505 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2506 	.alloc_lseg		= ff_layout_alloc_lseg,
2507 	.free_lseg		= ff_layout_free_lseg,
2508 	.add_lseg		= ff_layout_add_lseg,
2509 	.pg_read_ops		= &ff_layout_pg_read_ops,
2510 	.pg_write_ops		= &ff_layout_pg_write_ops,
2511 	.get_ds_info		= ff_layout_get_ds_info,
2512 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2513 	.read_pagelist		= ff_layout_read_pagelist,
2514 	.write_pagelist		= ff_layout_write_pagelist,
2515 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2516 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2517 	.sync			= pnfs_nfs_generic_sync,
2518 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2519 };
2520 
nfs4flexfilelayout_init(void)2521 static int __init nfs4flexfilelayout_init(void)
2522 {
2523 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2524 	       __func__);
2525 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2526 }
2527 
nfs4flexfilelayout_exit(void)2528 static void __exit nfs4flexfilelayout_exit(void)
2529 {
2530 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2531 	       __func__);
2532 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2533 }
2534 
2535 MODULE_ALIAS("nfs-layouttype4-4");
2536 
2537 MODULE_LICENSE("GPL");
2538 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2539 
2540 module_init(nfs4flexfilelayout_init);
2541 module_exit(nfs4flexfilelayout_exit);
2542 
2543 module_param(io_maxretrans, ushort, 0644);
2544 MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2545 			"retries an I/O request before returning an error. ");
2546