Home
last modified time | relevance | path

Searched refs:BLKIF_MAX_SEGMENTS_PER_REQUEST (Results 1 - 8 of 8) sorted by relevance

/kernel/linux/linux-5.10/include/xen/interface/io/
H A Dblkif.h159 * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
183 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 macro
202 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
/kernel/linux/linux-6.6/include/xen/interface/io/
H A Dblkif.h159 * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
183 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 macro
202 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
/kernel/linux/linux-5.10/drivers/block/xen-blkback/
H A Dblkback.c60 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
245 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts()
246 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts()
267 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || in free_persistent_gnts()
287 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants()
288 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants()
311 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { in xen_blkbk_unmap_purged_grants()
735 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap()
736 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap()
741 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST); in xen_blkbk_unmap()
[all...]
H A Dcommon.h85 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
138 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
403 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; in blkif_get_x86_32_req()
451 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; in blkif_get_x86_64_req()
/kernel/linux/linux-6.6/drivers/block/xen-blkback/
H A Dblkback.c60 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
245 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts()
246 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in free_persistent_gnts()
270 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || in free_persistent_gnts()
292 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants()
293 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap_purged_grants()
316 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { in xen_blkbk_unmap_purged_grants()
735 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap()
736 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; in xen_blkbk_unmap()
741 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST); in xen_blkbk_unmap()
[all...]
H A Dcommon.h85 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
138 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
/kernel/linux/linux-5.10/drivers/block/
H A Dxen-blkfront.c71 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
74 * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
77 #define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
626 grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) { in blkif_setup_rw_req_grant()
631 grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_setup_rw_req_grant()
704 first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_setup_extra_req()
706 second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_setup_extra_req()
708 (BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512; in blkif_setup_extra_req()
739 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST) in blkif_queue_rw_req()
773 num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_queue_rw_req()
[all...]
/kernel/linux/linux-6.6/drivers/block/
H A Dxen-blkfront.c72 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
75 * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
78 #define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
614 grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) { in blkif_setup_rw_req_grant()
619 grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_setup_rw_req_grant()
692 first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_setup_extra_req()
694 second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_setup_extra_req()
696 (BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512; in blkif_setup_extra_req()
727 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST) in blkif_queue_rw_req()
761 num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST; in blkif_queue_rw_req()
[all...]

Completed in 12 milliseconds