Lines Matching defs:mapping
199 void __flush_dcache_page(struct address_space *mapping, struct page *page)
202 * Writeback any data associated with the kernel mapping of this
204 * coherent with the kernels mapping.
232 if (mapping && cache_is_vipt_aliasing())
237 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
247 * - aliasing VIPT: we only need to find one mapping of this page.
251 flush_dcache_mmap_lock(mapping);
252 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
265 flush_dcache_mmap_unlock(mapping);
273 struct address_space *mapping;
284 mapping = page_mapping_file(page);
286 mapping = NULL;
289 __flush_dcache_page(mapping, page);
297 * Ensure cache coherency between kernel mapping and userspace mapping
317 struct address_space *mapping;
332 mapping = page_mapping_file(page);
335 mapping && !page_mapcount(page))
338 __flush_dcache_page(mapping, page);
339 if (mapping && cache_is_vivt())
340 __flush_dcache_aliases(mapping, page);
341 else if (mapping)
349 * Ensure cache coherency for the kernel mapping of this page. We can
360 struct address_space *mapping;
362 mapping = page_mapping_file(page);
364 if (!mapping || mapping_mapped(mapping)) {
399 * Write back and invalidate userspace mapping.
414 * Invalidate kernel mapping. No data should be contained
415 * in this mapping of the page. FIXME: this is overkill