Lines Matching refs:walker
63 struct kvm_pgtable_walker *walker;
179 struct kvm_pgtable_walker *walker = data->walker;
183 return walker->cb(ctx, visit);
186 static bool kvm_pgtable_walk_continue(const struct kvm_pgtable_walker *walker,
200 return !(walker->flags & KVM_PGTABLE_WALK_HANDLE_FAULT);
212 enum kvm_pgtable_walk_flags flags = data->walker->flags;
213 kvm_pte_t *ptep = kvm_dereference_pteref(data->walker, pteref);
217 .arg = data->walker->arg,
241 * Reload the page table after invoking the walker callback for leaf
242 * entries or after pre-order traversal, to allow the walker to descend
250 if (!kvm_pgtable_walk_continue(data->walker, ret))
261 if (!kvm_pgtable_walk_continue(data->walker, ret))
268 if (kvm_pgtable_walk_continue(data->walker, ret))
321 struct kvm_pgtable_walker *walker)
327 .walker = walker,
331 r = kvm_pgtable_walk_begin(walker);
336 kvm_pgtable_walk_end(walker);
361 struct kvm_pgtable_walker walker = {
369 PAGE_SIZE, &walker);
491 struct kvm_pgtable_walker walker = {
501 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
550 struct kvm_pgtable_walker walker = {
559 kvm_pgtable_walk(pgt, addr, size, &walker);
599 struct kvm_pgtable_walker walker = {
604 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
605 pgt->mm_ops->put_page(kvm_dereference_pteref(&walker, pgt->pgd));
793 * Should never occur if this walker has exclusive access to the
842 * pointless when the unmap walker needs to perform CMOs.
887 * walker using an invalid PA. Avoid offsetting an already invalid PA,
1045 struct kvm_pgtable_walker walker = {
1060 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1076 struct kvm_pgtable_walker walker = {
1086 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1136 struct kvm_pgtable_walker walker = {
1142 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1205 struct kvm_pgtable_walker walker = {
1211 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1285 struct kvm_pgtable_walker walker = {
1291 WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1339 struct kvm_pgtable_walker walker = {
1348 return kvm_pgtable_walk(pgt, addr, size, &walker);
1362 struct kvm_pgtable_walker walker = {
1375 .walker = &walker,
1499 struct kvm_pgtable_walker walker = {
1505 return kvm_pgtable_walk(pgt, addr, size, &walker);
1531 /* Ensure zeroed PGD pages are visible to the hardware walker */
1564 struct kvm_pgtable_walker walker = {
1570 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1572 pgt->mm_ops->free_pages_exact(kvm_dereference_pteref(&walker, pgt->pgd), pgd_sz);
1579 struct kvm_pgtable_walker walker = {
1585 .walker = &walker,