Lines Matching refs:req
64 hdr->req = nfs_list_entry(mirror->pg_list.next);
66 hdr->cred = nfs_req_openctx(hdr->req)->cred;
67 hdr->io_start = req_offset(hdr->req);
153 * @req: any member of the page group
156 nfs_page_group_lock_head(struct nfs_page *req)
158 struct nfs_page *head = req->wb_head;
165 if (head != req)
171 * nfs_unroll_locks - unlock all newly locked reqs and wait on @req
173 * @req: request that couldn't lock and needs to wait on the req bit lock
179 nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
184 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
249 * @req: request that is to be locked
251 * this lock must be held when modifying req->wb_head
256 nfs_page_set_headlock(struct nfs_page *req)
258 if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
261 set_bit(PG_CONTENDED1, &req->wb_flags);
263 return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
269 * @req: request that is to be locked
272 nfs_page_clear_headlock(struct nfs_page *req)
275 clear_bit(PG_HEADLOCK, &req->wb_flags);
277 if (!test_bit(PG_CONTENDED1, &req->wb_flags))
279 wake_up_bit(&req->wb_flags, PG_HEADLOCK);
284 * @req: request in group that is to be locked
292 nfs_page_group_lock(struct nfs_page *req)
296 ret = nfs_page_set_headlock(req);
297 if (ret || req->wb_head == req)
299 return nfs_page_set_headlock(req->wb_head);
304 * @req: request in group that is to be unlocked
307 nfs_page_group_unlock(struct nfs_page *req)
309 if (req != req->wb_head)
310 nfs_page_clear_headlock(req->wb_head);
311 nfs_page_clear_headlock(req);
320 nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
322 struct nfs_page *head = req->wb_head;
326 WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
328 tmp = req->wb_this_page;
329 while (tmp != req) {
336 tmp = req;
340 } while (tmp != req);
348 * @req - request in page group
351 bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
355 nfs_page_group_lock(req);
356 ret = nfs_page_group_sync_on_bit_locked(req, bit);
357 nfs_page_group_unlock(req);
363 * nfs_page_group_init - Initialize the page group linkage for @req
364 * @req - a new nfs request
365 * @prev - the previous request in page group, or NULL if @req is the first
369 nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
372 WARN_ON_ONCE(prev == req);
376 req->wb_head = req;
377 req->wb_this_page = req;
382 req->wb_head = prev->wb_head;
383 req->wb_this_page = prev->wb_this_page;
384 prev->wb_this_page = req;
388 kref_get(&req->wb_head->wb_kref);
394 inode = page_file_mapping(req->wb_page)->host;
395 set_bit(PG_INODE_REF, &req->wb_flags);
396 kref_get(&req->wb_kref);
404 * @req - request that no longer needs the page group
412 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
413 struct nfs_page *head = req->wb_head;
416 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
419 tmp = req;
427 } while (tmp != req);
430 if (head != req)
439 struct nfs_page *req;
445 req = nfs_page_alloc();
446 if (req == NULL)
449 req->wb_lock_context = l_ctx;
456 req->wb_page = page;
458 req->wb_index = page_index(page);
461 req->wb_offset = offset;
462 req->wb_pgbase = pgbase;
463 req->wb_bytes = count;
464 kref_init(&req->wb_kref);
465 req->wb_nio = 0;
466 return req;
497 nfs_create_subreq(struct nfs_page *req,
505 ret = __nfs_create_request(req->wb_lock_context, req->wb_page,
509 for (last = req->wb_head;
510 last->wb_this_page != req->wb_head;
515 ret->wb_index = req->wb_index;
517 ret->wb_nio = req->wb_nio;
524 * @req: pointer to request
526 void nfs_unlock_request(struct nfs_page *req)
528 if (!NFS_WBACK_BUSY(req)) {
533 clear_bit(PG_BUSY, &req->wb_flags);
535 if (!test_bit(PG_CONTENDED2, &req->wb_flags))
537 wake_up_bit(&req->wb_flags, PG_BUSY);
542 * @req: pointer to request
544 void nfs_unlock_and_release_request(struct nfs_page *req)
546 nfs_unlock_request(req);
547 nfs_release_request(req);
552 * @req:
557 static void nfs_clear_request(struct nfs_page *req)
559 struct page *page = req->wb_page;
560 struct nfs_lock_context *l_ctx = req->wb_lock_context;
565 req->wb_page = NULL;
575 req->wb_lock_context = NULL;
581 * @req: request to release
585 void nfs_free_request(struct nfs_page *req)
587 WARN_ON_ONCE(req->wb_this_page != req);
590 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
591 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
592 WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
593 WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
594 WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
597 nfs_clear_request(req);
598 nfs_page_free(req);
601 void nfs_release_request(struct nfs_page *req)
603 kref_put(&req->wb_kref, nfs_page_group_destroy);
609 * @req: request to wait upon.
615 nfs_wait_on_request(struct nfs_page *req)
617 if (!test_bit(PG_BUSY, &req->wb_flags))
619 set_bit(PG_CONTENDED2, &req->wb_flags);
621 return wait_on_bit_io(&req->wb_flags, PG_BUSY,
630 * @req: this request
632 * Returns zero if @req cannot be coalesced into @desc, otherwise it returns
636 struct nfs_page *prev, struct nfs_page *req)
651 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
655 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
709 struct nfs_page *req = hdr->req;
715 hdr->args.offset = req_offset(req);
718 hdr->args.pgbase = req->wb_pgbase;
721 hdr->args.context = get_nfs_open_context(nfs_req_openctx(req));
722 hdr->args.lock_context = req->wb_lock_context;
779 "(req %s/%llu, %u bytes @ offset %llu)\n",
897 struct nfs_page *req;
926 req = nfs_list_entry(head->next);
927 nfs_list_move_request(req, &hdr->pages);
929 if (!last_page || last_page != req->wb_page) {
933 *pages++ = last_page = req->wb_page;
1001 struct nfs_page *req)
1006 mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
1042 * @req: pointer to nfs_page
1045 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
1052 struct nfs_page *req,
1058 if (!nfs_match_open_context(nfs_req_openctx(req), nfs_req_openctx(prev)))
1060 flctx = d_inode(nfs_req_openctx(req)->dentry)->i_flctx;
1064 !nfs_match_lock_context(req->wb_lock_context,
1067 if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
1069 if (req->wb_page == prev->wb_page) {
1070 if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
1073 if (req->wb_pgbase != 0 ||
1078 return pgio->pg_ops->pg_test(pgio, prev, req);
1084 * @req: request
1086 * If the request 'req' was successfully coalesced into the existing list
1087 * of pages 'desc', it returns the size of req.
1091 struct nfs_page *req)
1099 desc->pg_ops->pg_init(desc, req);
1102 mirror->pg_base = req->wb_pgbase;
1108 if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) {
1116 size = nfs_coalesce_size(prev, req, desc);
1117 if (size < req->wb_bytes)
1119 nfs_list_move_request(req, &mirror->pg_list);
1120 mirror->pg_count += req->wb_bytes;
1121 return req->wb_bytes;
1146 struct nfs_page *req)
1150 nfs_list_move_request(req, &head);
1157 * @req: request
1160 * same page group. If so, it will submit @req as the last one, to ensure
1161 * the pointer to @req is still valid in case of failure.
1163 * Returns true if the request 'req' was successfully coalesced into the
1167 struct nfs_page *req)
1173 nfs_page_group_lock(req);
1175 subreq = req;
1181 if (subreq == req)
1183 req->wb_pgbase += size;
1184 req->wb_bytes -= size;
1185 req->wb_offset += size;
1186 subreq_size = req->wb_bytes;
1187 subreq = req;
1190 if (WARN_ON_ONCE(subreq != req)) {
1191 nfs_page_group_unlock(req);
1193 subreq = req;
1194 subreq_size = req->wb_bytes;
1195 nfs_page_group_lock(req);
1199 nfs_page_group_unlock(req);
1205 nfs_page_group_lock(req);
1208 subreq = nfs_create_subreq(req, req->wb_pgbase,
1209 req->wb_offset, size);
1215 nfs_page_group_unlock(req);
1219 nfs_page_group_unlock(req);
1235 struct nfs_page *req;
1237 req = list_first_entry(&head, struct nfs_page, wb_list);
1238 if (__nfs_pageio_add_request(desc, req))
1252 struct nfs_page *req)
1257 ret = __nfs_pageio_add_request(desc, req);
1284 struct nfs_page *req)
1290 pgbase = req->wb_pgbase;
1291 offset = req->wb_offset;
1292 bytes = req->wb_bytes;
1294 nfs_pageio_setup_mirroring(desc, req);
1300 nfs_page_group_lock(req);
1302 dupreq = nfs_create_subreq(req,
1305 nfs_page_group_unlock(req);
1317 if (!nfs_pageio_add_request_mirror(desc, req))
1373 struct nfs_page *req = nfs_list_entry(pages.next);
1375 if (!nfs_pageio_add_request(desc, req))