Lines Matching refs:req
37 const struct nfs_page *req;
42 const struct nfs_page *req)
44 i->req = req;
50 const struct nfs_page *req = i->req;
53 i->count = (tmp < req->wb_bytes) ? tmp : req->wb_bytes;
58 const struct nfs_page *req = i->req;
61 if (i->count != req->wb_bytes) {
62 size_t base = i->count + req->wb_pgbase;
65 page = nfs_page_to_page(req, base);
102 hdr->req = nfs_list_entry(mirror->pg_list.next);
104 hdr->cred = nfs_req_openctx(hdr->req)->cred;
105 hdr->io_start = req_offset(hdr->req);
192 * @req: any member of the page group
195 nfs_page_group_lock_head(struct nfs_page *req)
197 struct nfs_page *head = req->wb_head;
204 if (head != req)
210 * nfs_unroll_locks - unlock all newly locked reqs and wait on @req
212 * @req: request that couldn't lock and needs to wait on the req bit lock
218 nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
223 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
288 * @req: request that is to be locked
290 * this lock must be held when modifying req->wb_head
295 nfs_page_set_headlock(struct nfs_page *req)
297 if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
300 set_bit(PG_CONTENDED1, &req->wb_flags);
302 return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
308 * @req: request that is to be locked
311 nfs_page_clear_headlock(struct nfs_page *req)
313 clear_bit_unlock(PG_HEADLOCK, &req->wb_flags);
315 if (!test_bit(PG_CONTENDED1, &req->wb_flags))
317 wake_up_bit(&req->wb_flags, PG_HEADLOCK);
322 * @req: request in group that is to be locked
330 nfs_page_group_lock(struct nfs_page *req)
334 ret = nfs_page_set_headlock(req);
335 if (ret || req->wb_head == req)
337 return nfs_page_set_headlock(req->wb_head);
342 * @req: request in group that is to be unlocked
345 nfs_page_group_unlock(struct nfs_page *req)
347 if (req != req->wb_head)
348 nfs_page_clear_headlock(req->wb_head);
349 nfs_page_clear_headlock(req);
358 nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
360 struct nfs_page *head = req->wb_head;
364 WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
366 tmp = req->wb_this_page;
367 while (tmp != req) {
374 tmp = req;
378 } while (tmp != req);
386 * @req - request in page group
389 bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
393 nfs_page_group_lock(req);
394 ret = nfs_page_group_sync_on_bit_locked(req, bit);
395 nfs_page_group_unlock(req);
401 * nfs_page_group_init - Initialize the page group linkage for @req
402 * @req - a new nfs request
403 * @prev - the previous request in page group, or NULL if @req is the first
407 nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
410 WARN_ON_ONCE(prev == req);
414 req->wb_head = req;
415 req->wb_this_page = req;
420 req->wb_head = prev->wb_head;
421 req->wb_this_page = prev->wb_this_page;
422 prev->wb_this_page = req;
426 kref_get(&req->wb_head->wb_kref);
432 inode = nfs_page_to_inode(req);
433 set_bit(PG_INODE_REF, &req->wb_flags);
434 kref_get(&req->wb_kref);
442 * @req - request that no longer needs the page group
450 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
451 struct nfs_page *head = req->wb_head;
454 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
457 tmp = req;
465 } while (tmp != req);
468 if (head != req)
476 struct nfs_page *req;
482 req = nfs_page_alloc();
483 if (req == NULL)
486 req->wb_lock_context = l_ctx;
493 req->wb_pgbase = pgbase;
494 req->wb_index = index;
495 req->wb_offset = offset;
496 req->wb_bytes = count;
497 kref_init(&req->wb_kref);
498 req->wb_nio = 0;
499 return req;
502 static void nfs_page_assign_folio(struct nfs_page *req, struct folio *folio)
505 req->wb_folio = folio;
507 set_bit(PG_FOLIO, &req->wb_flags);
511 static void nfs_page_assign_page(struct nfs_page *req, struct page *page)
514 req->wb_page = page;
582 nfs_create_subreq(struct nfs_page *req,
589 struct folio *folio = nfs_page_to_folio(req);
590 struct page *page = nfs_page_to_page(req, pgbase);
592 ret = nfs_page_create(req->wb_lock_context, pgbase, req->wb_index,
600 for (last = req->wb_head;
601 last->wb_this_page != req->wb_head;
607 ret->wb_nio = req->wb_nio;
614 * @req: pointer to request
616 void nfs_unlock_request(struct nfs_page *req)
618 clear_bit_unlock(PG_BUSY, &req->wb_flags);
620 if (!test_bit(PG_CONTENDED2, &req->wb_flags))
622 wake_up_bit(&req->wb_flags, PG_BUSY);
627 * @req: pointer to request
629 void nfs_unlock_and_release_request(struct nfs_page *req)
631 nfs_unlock_request(req);
632 nfs_release_request(req);
637 * @req:
642 static void nfs_clear_request(struct nfs_page *req)
644 struct folio *folio = nfs_page_to_folio(req);
645 struct page *page = req->wb_page;
646 struct nfs_lock_context *l_ctx = req->wb_lock_context;
651 req->wb_folio = NULL;
652 clear_bit(PG_FOLIO, &req->wb_flags);
655 req->wb_page = NULL;
665 req->wb_lock_context = NULL;
671 * @req: request to release
675 void nfs_free_request(struct nfs_page *req)
677 WARN_ON_ONCE(req->wb_this_page != req);
680 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
681 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
682 WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
683 WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
684 WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
687 nfs_clear_request(req);
688 nfs_page_free(req);
691 void nfs_release_request(struct nfs_page *req)
693 kref_put(&req->wb_kref, nfs_page_group_destroy);
699 * @req: request to wait upon.
705 nfs_wait_on_request(struct nfs_page *req)
707 if (!test_bit(PG_BUSY, &req->wb_flags))
709 set_bit(PG_CONTENDED2, &req->wb_flags);
711 return wait_on_bit_io(&req->wb_flags, PG_BUSY,
720 * @req: this request
722 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
726 struct nfs_page *prev, struct nfs_page *req)
741 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
745 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
800 struct nfs_page *req = hdr->req;
806 hdr->args.offset = req_offset(req);
812 hdr->args.context = get_nfs_open_context(nfs_req_openctx(req));
813 hdr->args.lock_context = req->wb_lock_context;
873 "(req %s/%llu, %u bytes @ offset %llu)\n",
989 struct nfs_page *req;
1022 req = nfs_list_entry(head->next);
1023 nfs_list_move_request(req, &hdr->pages);
1025 if (req->wb_pgbase == 0)
1028 nfs_page_iter_page_init(&i, req);
1109 struct nfs_page *req)
1114 mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
1148 const struct nfs_page *req)
1152 if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
1154 if (req->wb_pgbase == 0)
1156 if (req->wb_pgbase == prev_end) {
1157 struct folio *folio = nfs_page_to_folio(req);
1160 return req->wb_page == prev->wb_page;
1168 * @req: pointer to nfs_page
1171 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
1178 struct nfs_page *req,
1184 if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
1186 flctx = locks_inode_context(d_inode(nfs_req_openctx(req)->dentry));
1190 !nfs_match_lock_context(req->wb_lock_context,
1193 if (!nfs_page_is_contiguous(prev, req))
1196 return pgio->pg_ops->pg_test(pgio, prev, req);
1202 * @req: request
1204 * If the request 'req' was successfully coalesced into the existing list
1205 * of pages 'desc', it returns the size of req.
1209 struct nfs_page *req)
1217 desc->pg_ops->pg_init(desc, req);
1220 mirror->pg_base = req->wb_pgbase;
1226 if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
1234 size = nfs_coalesce_size(prev, req, desc);
1235 if (size < req->wb_bytes)
1237 nfs_list_move_request(req, &mirror->pg_list);
1238 mirror->pg_count += req->wb_bytes;
1239 return req->wb_bytes;
1260 struct nfs_page *req)
1264 nfs_list_move_request(req, &head);
1271 * @req: request
1274 * same page group. If so, it will submit @req as the last one, to ensure
1275 * the pointer to @req is still valid in case of failure.
1277 * Returns true if the request 'req' was successfully coalesced into the
1281 struct nfs_page *req)
1287 nfs_page_group_lock(req);
1289 subreq = req;
1295 if (subreq == req)
1297 req->wb_pgbase += size;
1298 req->wb_bytes -= size;
1299 req->wb_offset += size;
1300 subreq_size = req->wb_bytes;
1301 subreq = req;
1304 if (WARN_ON_ONCE(subreq != req)) {
1305 nfs_page_group_unlock(req);
1307 subreq = req;
1308 subreq_size = req->wb_bytes;
1309 nfs_page_group_lock(req);
1313 nfs_page_group_unlock(req);
1319 nfs_page_group_lock(req);
1322 subreq = nfs_create_subreq(req, req->wb_pgbase,
1323 req->wb_offset, size);
1329 nfs_page_group_unlock(req);
1333 nfs_page_group_unlock(req);
1347 struct nfs_page *req;
1349 req = list_first_entry(&head, struct nfs_page, wb_list);
1350 if (__nfs_pageio_add_request(desc, req))
1364 struct nfs_page *req)
1369 ret = __nfs_pageio_add_request(desc, req);
1396 struct nfs_page *req)
1402 pgbase = req->wb_pgbase;
1403 offset = req->wb_offset;
1404 bytes = req->wb_bytes;
1406 nfs_pageio_setup_mirroring(desc, req);
1412 nfs_page_group_lock(req);
1414 dupreq = nfs_create_subreq(req,
1417 nfs_page_group_unlock(req);
1429 if (!nfs_pageio_add_request_mirror(desc, req))
1486 struct nfs_page *req = nfs_list_entry(pages.next);
1488 if (!nfs_pageio_add_request(desc, req))