Lines Matching refs:mem

55 add_gtt_bo_map(struct aub_mem *mem, struct intel_batch_decode_bo bo, bool ppgtt, bool unmap_after_use)
62 list_add(&m->link, &mem->maps);
66 aub_mem_clear_bo_maps(struct aub_mem *mem)
68 list_for_each_entry_safe(struct bo_map, i, &mem->maps, link) {
105 ensure_ggtt_entry(struct aub_mem *mem, uint64_t virt_addr)
107 struct rb_node *node = rb_tree_search_sloppy(&mem->ggtt, &virt_addr,
113 rb_tree_insert_at(&mem->ggtt, node, &new_entry->node, cmp < 0);
121 search_ggtt_entry(struct aub_mem *mem, uint64_t virt_addr)
125 struct rb_node *node = rb_tree_search(&mem->ggtt, &virt_addr, cmp_ggtt_entry);
136 struct phys_mem *mem = rb_node_data(struct phys_mem, node, node);
137 return cmp_uint64(mem->phys_addr, *(uint64_t *)addr);
158 ensure_phys_mem(struct aub_mem *mem, uint64_t phys_addr)
160 struct rb_node *node = rb_tree_search_sloppy(&mem->mem, &phys_addr, cmp_phys_mem);
165 new_mem->fd_offset = mem->mem_fd_len;
167 ASSERTED int ftruncate_res = ftruncate(mem->mem_fd, mem->mem_fd_len += 4096);
171 mem->mem_fd, new_mem->fd_offset);
174 rb_tree_insert_at(&mem->mem, node, &new_mem->node, cmp < 0);
182 search_phys_mem(struct aub_mem *mem, uint64_t phys_addr)
186 struct rb_node *node = rb_tree_search(&mem->mem, &phys_addr, cmp_phys_mem);
198 struct aub_mem *mem = _mem;
204 add_gtt_bo_map(mem, bo, false, false);
211 struct aub_mem *mem = _mem;
218 struct ggtt_entry *pt = ensure_ggtt_entry(mem, virt_addr);
227 struct aub_mem *mem = _mem;
230 struct phys_mem *pmem = ensure_phys_mem(mem, page);
244 struct aub_mem *mem = _mem;
247 struct ggtt_entry *entry = search_ggtt_entry(mem, page);
255 aub_mem_phys_write(mem, phys_page + offset, data, size_this_page);
263 struct aub_mem *mem = _mem;
266 list_for_each_entry(struct bo_map, i, &mem->maps, link)
273 (struct ggtt_entry *)rb_tree_search_sloppy(&mem->ggtt, &address,
295 struct phys_mem *phys_mem = search_phys_mem(mem, phys_addr);
302 MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
306 add_gtt_bo_map(mem, bo, false, true);
312 ppgtt_walk(struct aub_mem *mem, uint64_t pml4, uint64_t address)
317 struct phys_mem *table = search_phys_mem(mem, addr);
327 return search_phys_mem(mem, addr);
331 ppgtt_mapped(struct aub_mem *mem, uint64_t pml4, uint64_t address)
333 return ppgtt_walk(mem, pml4, address) != NULL;
339 struct aub_mem *mem = _mem;
342 list_for_each_entry(struct bo_map, i, &mem->maps, link)
348 if (!ppgtt_mapped(mem, mem->pml4, address))
355 while (ppgtt_mapped(mem, mem->pml4, end))
364 struct phys_mem *phys_mem = ppgtt_walk(mem, mem->pml4, page);
367 MAP_SHARED | MAP_FIXED, mem->mem_fd, phys_mem->fd_offset);
371 add_gtt_bo_map(mem, bo, true, true);
377 aub_mem_init(struct aub_mem *mem)
379 memset(mem, 0, sizeof(*mem));
381 list_inithead(&mem->maps);
383 mem->mem_fd = os_create_anonymous_file(0, "phys memory");
385 return mem->mem_fd != -1;
389 aub_mem_fini(struct aub_mem *mem)
391 if (mem->mem_fd == -1)
394 aub_mem_clear_bo_maps(mem);
397 rb_tree_foreach_safe(struct ggtt_entry, entry, &mem->ggtt, node) {
398 rb_tree_remove(&mem->ggtt, &entry->node);
401 rb_tree_foreach_safe(struct phys_mem, entry, &mem->mem, node) {
402 rb_tree_remove(&mem->mem, &entry->node);
406 close(mem->mem_fd);
407 mem->mem_fd = -1;
411 aub_mem_get_phys_addr_data(struct aub_mem *mem, uint64_t phys_addr)
413 struct phys_mem *page = search_phys_mem(mem, phys_addr);
420 aub_mem_get_ppgtt_addr_data(struct aub_mem *mem, uint64_t virt_addr)
422 struct phys_mem *page = ppgtt_walk(mem, mem->pml4, virt_addr);
429 aub_mem_get_ppgtt_addr_aub_data(struct aub_mem *mem, uint64_t virt_addr)
431 struct phys_mem *page = ppgtt_walk(mem, mem->pml4, virt_addr);