Lines Matching refs:ra

31  * memset *ra to zero.
34 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
36 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
37 ra->prev_pos = -1;
275 struct file_ra_state *ra, unsigned long nr_to_read)
290 max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
308 * for 128k (32 page) max ra
329 static unsigned long get_next_ra_size(struct file_ra_state *ra,
332 unsigned long cur = ra->size;
402 struct file_ra_state *ra,
425 ra->start = index;
426 ra->size = min(size + req_size, max);
427 ra->async_size = 1;
436 struct file_ra_state *ra, bool hit_readahead_marker,
440 unsigned long max_pages = ra->ra_pages;
462 if ((index == (ra->start + ra->size - ra->async_size) ||
463 index == (ra->start + ra->size))) {
464 ra->start += ra->size;
465 ra->size = get_next_ra_size(ra, max_pages);
466 ra->async_size = ra->size;
487 ra->start = start;
488 ra->size = start - index; /* old async_size */
489 ra->size += req_size;
490 ra->size = get_next_ra_size(ra, max_pages);
491 ra->async_size = ra->size;
506 prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
514 if (try_context_readahead(ractl->mapping, ra, index, req_size,
526 ra->start = index;
527 ra->size = get_init_ra_size(req_size, max_pages);
528 ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
537 if (index == ra->start && ra->size == ra->async_size) {
538 add_pages = get_next_ra_size(ra, max_pages);
539 if (ra->size + add_pages <= max_pages) {
540 ra->async_size = add_pages;
541 ra->size += add_pages;
543 ra->size = max_pages;
544 ra->async_size = max_pages >> 1;
548 ractl->_index = ra->start;
549 do_page_cache_ra(ractl, ra->size, ra->async_size);
553 struct file_ra_state *ra, unsigned long req_count)
563 if (!ra->ra_pages || blk_cgroup_congested()) {
572 force_page_cache_ra(ractl, ra, req_count);
577 ondemand_readahead(ractl, ra, false, req_count);
582 struct file_ra_state *ra, struct page *page,
586 if (!ra->ra_pages)
607 ondemand_readahead(ractl, ra, true, req_count);