Lines Matching defs:map
445 /* Add an additional smaller vram mappable heap if we can't map all the
2150 /* It's a bit hard to exactly map our implementation to the limits
2909 memcpy(state.map, p, size);
2957 .start = device->trivial_batch_bo->map,
2958 .next = device->trivial_batch_bo->map,
2959 .end = device->trivial_batch_bo->map + 4096,
2982 .map = bo->map,
3021 .map = (*bo)->bo->map,
3050 buf->base.map = buf->state.map;
3408 intel_debug_write_identifiers(device->workaround_bo->map,
3414 intel_debug_get_identifier_block(device->workaround_bo->map,
3451 if (device->cps_states.map == NULL)
3465 isl_null_fill_state(&device->isl_dev, device->null_surface_state.map,
3812 mem->map = NULL;
4125 if (mem->map)
4185 if (mem->map != NULL) {
4196 /* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
4205 /* Let's map whole pages */
4208 void *map;
4210 map_size, gem_flags, &map);
4214 mem->map = map;
4217 *ppData = mem->map + mem->map_delta;
4232 anv_device_unmap_bo(device, mem->bo, mem->map, mem->map_size);
4234 mem->map = NULL;
4261 intel_clflush_range(mem->map + map_offset,
4288 intel_invalidate_range(mem->map + map_offset,
4371 *(uint64_t *)event->state.map = VK_EVENT_RESET;
4404 return *(uint64_t *)event->state.map;
4413 *(uint64_t *)event->state.map = VK_EVENT_SET;
4424 *(uint64_t *)event->state.map = VK_EVENT_RESET;
4592 isl_buffer_fill_state(&device->isl_dev, state.map,
4613 if (sampler->bindless_state.map) {
4618 if (sampler->custom_border_color.map) {