Lines Matching defs:cache
102 struct kvm_mmu_memory_cache *cache;
110 cache = &kvm->arch.mmu.split_page_cache;
111 return kvm_mmu_memory_cache_nr_free_objects(cache) < min;
117 struct kvm_mmu_memory_cache *cache;
130 cache = &kvm->arch.mmu.split_page_cache;
137 ret = __kvm_mmu_topup_memory_cache(cache,
150 ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache);
287 * bypass the data cache and go directly to RAM. However, the CPUs
288 * can still speculate reads (not writes) and fill cache lines with
291 * Those cache lines will be *clean* cache lines though, so a
293 * operation, because no cache lines are marked dirty.
295 * Those clean cache lines could be filled prior to an uncached write
296 * by the guest, and the cache coherent IO subsystem would therefore
301 * never hit in the cache.
346 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
349 * Go through the stage 2 page tables and invalidate any cache lines
1069 struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO };
1082 ret = kvm_mmu_topup_memory_cache(&cache,
1089 &cache, 0);
1097 kvm_mmu_free_memory_cache(&cache);
1723 * Check for a cache maintenance operation. Since we
1803 * the cache to the PoC.
1998 * Free any leftovers from the eager page splitting cache. Do
2002 * cache around for any of the other cases.
2102 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2106 * usage of the cache maintenance instructions that operate by set/way
2107 * is associated with the cache maintenance instructions associated