Lines Matching refs:tlb
14 #include <asm/tlb.h>
18 static bool tlb_next_batch(struct mmu_gather *tlb)
23 if (tlb->delayed_rmap && tlb->active != &tlb->local)
26 batch = tlb->active;
28 tlb->active = batch->next;
32 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
39 tlb->batch_count++;
44 tlb->active->next = batch;
45 tlb->active = batch;
65 * @tlb: the current mmu_gather
73 void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma)
75 if (!tlb->delayed_rmap)
78 tlb_flush_rmap_batch(&tlb->local, vma);
79 if (tlb->active != &tlb->local)
80 tlb_flush_rmap_batch(tlb->active, vma);
81 tlb->delayed_rmap = 0;
85 static void tlb_batch_pages_flush(struct mmu_gather *tlb)
89 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
105 tlb->active = &tlb->local;
108 static void tlb_batch_list_free(struct mmu_gather *tlb)
112 for (batch = tlb->local.next; batch; batch = next) {
116 tlb->local.next = NULL;
119 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, int page_size)
123 VM_BUG_ON(!tlb->end);
126 VM_WARN_ON(tlb->page_size != page_size);
129 batch = tlb->active;
136 if (!tlb_next_batch(tlb))
138 batch = tlb->active;
229 static inline void tlb_table_invalidate(struct mmu_gather *tlb)
237 tlb_flush_mmu_tlbonly(tlb);
247 static void tlb_table_flush(struct mmu_gather *tlb)
249 struct mmu_table_batch **batch = &tlb->batch;
252 tlb_table_invalidate(tlb);
258 void tlb_remove_table(struct mmu_gather *tlb, void *table)
260 struct mmu_table_batch **batch = &tlb->batch;
265 tlb_table_invalidate(tlb);
274 tlb_table_flush(tlb);
277 static inline void tlb_table_init(struct mmu_gather *tlb)
279 tlb->batch = NULL;
284 static inline void tlb_table_flush(struct mmu_gather *tlb) { }
285 static inline void tlb_table_init(struct mmu_gather *tlb) { }
289 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
291 tlb_table_flush(tlb);
293 tlb_batch_pages_flush(tlb);
297 void tlb_flush_mmu(struct mmu_gather *tlb)
299 tlb_flush_mmu_tlbonly(tlb);
300 tlb_flush_mmu_free(tlb);
303 static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
306 tlb->mm = mm;
307 tlb->fullmm = fullmm;
310 tlb->need_flush_all = 0;
311 tlb->local.next = NULL;
312 tlb->local.nr = 0;
313 tlb->local.max = ARRAY_SIZE(tlb->__pages);
314 tlb->active = &tlb->local;
315 tlb->batch_count = 0;
317 tlb->delayed_rmap = 0;
319 tlb_table_init(tlb);
321 tlb->page_size = 0;
324 __tlb_reset_range(tlb);
325 inc_tlb_flush_pending(tlb->mm);
330 * @tlb: the mmu_gather structure to initialize
336 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
338 __tlb_gather_mmu(tlb, mm, false);
343 * @tlb: the mmu_gather structure to initialize
352 void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
354 __tlb_gather_mmu(tlb, mm, true);
359 * @tlb: the mmu_gather structure to finish
364 void tlb_finish_mmu(struct mmu_gather *tlb)
378 if (mm_tlb_flush_nested(tlb->mm)) {
387 tlb->fullmm = 1;
388 __tlb_reset_range(tlb);
389 tlb->freed_tables = 1;
392 tlb_flush_mmu(tlb);
395 tlb_batch_list_free(tlb);
397 dec_tlb_flush_pending(tlb->mm);