Lines Matching defs:xas
129 XA_STATE(xas, &mapping->i_pages, folio->index);
132 mapping_set_update(&xas, mapping);
136 xas_set_order(&xas, folio->index, folio_order(folio));
142 xas_store(&xas, shadow);
143 xas_init_marks(&xas);
282 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
287 mapping_set_update(&xas, mapping);
288 xas_for_each(&xas, folio, ULONG_MAX) {
314 xas_store(&xas, NULL);
476 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
484 folio = xas_find(&xas, max);
485 if (xas_retry(&xas, folio))
634 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
642 xas_for_each(&xas, folio, max) {
643 if (xas_retry(&xas, folio))
812 XA_STATE(xas, &mapping->i_pages, offset);
824 xas_lock_irq(&xas);
825 xas_store(&xas, new);
837 xas_unlock_irq(&xas);
847 XA_STATE(xas, &mapping->i_pages, index);
854 mapping_set_update(&xas, mapping);
862 xas_set_order(&xas, index, folio_order(folio));
869 folio->index = xas.xa_index;
872 unsigned int order = xa_get_order(xas.xa, xas.xa_index);
876 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
878 xas_lock_irq(&xas);
879 xas_for_each_conflict(&xas, entry) {
882 xas_set_err(&xas, -EEXIST);
891 order = xa_get_order(xas.xa, xas.xa_index);
895 xas_split(&xas, old, order);
896 xas_reset(&xas);
900 xas_store(&xas, folio);
901 if (xas_error(&xas))
914 xas_unlock_irq(&xas);
915 } while (xas_nomem(&xas, gfp));
917 if (xas_error(&xas))
928 return xas_error(&xas);
1734 XA_STATE(xas, &mapping->i_pages, index);
1737 void *entry = xas_next(&xas);
1740 if (xas.xa_index == 0)
1744 return xas.xa_index;
1770 XA_STATE(xas, &mapping->i_pages, index);
1773 void *entry = xas_prev(&xas);
1776 if (xas.xa_index == ULONG_MAX)
1780 return xas.xa_index;
1818 XA_STATE(xas, &mapping->i_pages, index);
1823 xas_reset(&xas);
1824 folio = xas_load(&xas);
1825 if (xas_retry(&xas, folio))
1837 if (unlikely(folio != xas_reload(&xas))) {
1969 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
1976 folio = xas_find(xas, max);
1978 folio = xas_find_marked(xas, max, mark);
1980 if (xas_retry(xas, folio))
1993 if (unlikely(folio != xas_reload(xas))) {
2000 xas_reset(xas);
2027 XA_STATE(xas, &mapping->i_pages, *start);
2031 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2032 indices[fbatch->nr] = xas.xa_index;
2073 XA_STATE(xas, &mapping->i_pages, *start);
2077 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2088 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2091 indices[fbatch->nr] = xas.xa_index;
2138 XA_STATE(xas, &mapping->i_pages, *start);
2142 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2191 XA_STATE(xas, &mapping->i_pages, *start);
2197 for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2198 folio = xas_next(&xas)) {
2199 if (xas_retry(&xas, folio))
2211 if (unlikely(folio != xas_reload(&xas)))
2227 xas_reset(&xas);
2262 XA_STATE(xas, &mapping->i_pages, *start);
2266 while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
2332 XA_STATE(xas, &mapping->i_pages, index);
2336 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2337 if (xas_retry(&xas, folio))
2339 if (xas.xa_index > max || xa_is_value(folio))
2346 if (unlikely(folio != xas_reload(&xas)))
2355 xas_advance(&xas, folio_next_index(folio) - 1);
2360 xas_reset(&xas);
2990 static inline loff_t folio_seek_hole_data(struct xa_state *xas,
3002 xas_pause(xas);
3023 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
3026 return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
3051 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
3060 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
3061 loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
3070 seek_size = seek_folio_size(&xas, folio);
3072 start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
3079 xas_set(&xas, pos >> PAGE_SHIFT);
3440 static struct folio *next_uptodate_folio(struct xa_state *xas,
3443 struct folio *folio = xas_next_entry(xas, end_pgoff);
3449 if (xas_retry(xas, folio))
3458 if (unlikely(folio != xas_reload(xas)))
3469 if (xas->xa_index >= max_idx)
3476 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3575 XA_STATE(xas, &mapping->i_pages, start_pgoff);
3581 folio = next_uptodate_folio(&xas, mapping, end_pgoff);
3600 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3601 vmf->pte += xas.xa_index - last_pgoff;
3602 last_pgoff = xas.xa_index;
3604 nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
3611 xas.xa_index - folio->index, addr,
3616 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
4157 XA_STATE(xas, &mapping->i_pages, first_index);
4161 xas_for_each(&xas, folio, last_index) {
4177 if (xas_retry(&xas, folio))
4180 order = xa_get_order(xas.xa, xas.xa_index);
4182 folio_first_index = round_down(xas.xa_index, 1 << order);
4216 if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
4219 if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
4224 xas_pause(&xas);