162306a36Sopenharmony_ci// SPDX-License-Identifier: MIT 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Copyright © 2022 Intel Corporation 462306a36Sopenharmony_ci */ 562306a36Sopenharmony_ci 662306a36Sopenharmony_ci#include "i915_selftest.h" 762306a36Sopenharmony_ci 862306a36Sopenharmony_ci#include "gem/i915_gem_internal.h" 962306a36Sopenharmony_ci#include "gem/i915_gem_lmem.h" 1062306a36Sopenharmony_ci#include "gem/i915_gem_region.h" 1162306a36Sopenharmony_ci 1262306a36Sopenharmony_ci#include "gen8_engine_cs.h" 1362306a36Sopenharmony_ci#include "i915_gem_ww.h" 1462306a36Sopenharmony_ci#include "intel_engine_regs.h" 1562306a36Sopenharmony_ci#include "intel_gpu_commands.h" 1662306a36Sopenharmony_ci#include "intel_context.h" 1762306a36Sopenharmony_ci#include "intel_gt.h" 1862306a36Sopenharmony_ci#include "intel_ring.h" 1962306a36Sopenharmony_ci 2062306a36Sopenharmony_ci#include "selftests/igt_flush_test.h" 2162306a36Sopenharmony_ci#include "selftests/i915_random.h" 2262306a36Sopenharmony_ci 2362306a36Sopenharmony_cistatic void vma_set_qw(struct i915_vma *vma, u64 addr, u64 val) 2462306a36Sopenharmony_ci{ 2562306a36Sopenharmony_ci GEM_BUG_ON(addr < i915_vma_offset(vma)); 2662306a36Sopenharmony_ci GEM_BUG_ON(addr >= i915_vma_offset(vma) + i915_vma_size(vma) + sizeof(val)); 2762306a36Sopenharmony_ci memset64(page_mask_bits(vma->obj->mm.mapping) + 2862306a36Sopenharmony_ci (addr - i915_vma_offset(vma)), val, 1); 2962306a36Sopenharmony_ci} 3062306a36Sopenharmony_ci 3162306a36Sopenharmony_cistatic int 3262306a36Sopenharmony_cipte_tlbinv(struct intel_context *ce, 3362306a36Sopenharmony_ci struct i915_vma *va, 3462306a36Sopenharmony_ci struct i915_vma *vb, 3562306a36Sopenharmony_ci u64 align, 3662306a36Sopenharmony_ci void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length), 3762306a36Sopenharmony_ci u64 length, 3862306a36Sopenharmony_ci struct rnd_state *prng) 3962306a36Sopenharmony_ci{ 4062306a36Sopenharmony_ci const unsigned int pat_index = 4162306a36Sopenharmony_ci i915_gem_get_pat_index(ce->vm->i915, I915_CACHE_NONE); 4262306a36Sopenharmony_ci struct drm_i915_gem_object *batch; 4362306a36Sopenharmony_ci struct drm_mm_node vb_node; 4462306a36Sopenharmony_ci struct i915_request *rq; 4562306a36Sopenharmony_ci struct i915_vma *vma; 4662306a36Sopenharmony_ci u64 addr; 4762306a36Sopenharmony_ci int err; 4862306a36Sopenharmony_ci u32 *cs; 4962306a36Sopenharmony_ci 5062306a36Sopenharmony_ci batch = i915_gem_object_create_internal(ce->vm->i915, 4096); 5162306a36Sopenharmony_ci if (IS_ERR(batch)) 5262306a36Sopenharmony_ci return PTR_ERR(batch); 5362306a36Sopenharmony_ci 5462306a36Sopenharmony_ci vma = i915_vma_instance(batch, ce->vm, NULL); 5562306a36Sopenharmony_ci if (IS_ERR(vma)) { 5662306a36Sopenharmony_ci err = PTR_ERR(vma); 5762306a36Sopenharmony_ci goto out; 5862306a36Sopenharmony_ci } 5962306a36Sopenharmony_ci 6062306a36Sopenharmony_ci err = i915_vma_pin(vma, 0, 0, PIN_USER); 6162306a36Sopenharmony_ci if (err) 6262306a36Sopenharmony_ci goto out; 6362306a36Sopenharmony_ci 6462306a36Sopenharmony_ci /* Pin va at random but aligned offset after vma */ 6562306a36Sopenharmony_ci addr = round_up(vma->node.start + vma->node.size, align); 6662306a36Sopenharmony_ci /* MI_CONDITIONAL_BATCH_BUFFER_END limits address to 48b */ 6762306a36Sopenharmony_ci addr = igt_random_offset(prng, addr, min(ce->vm->total, BIT_ULL(48)), 6862306a36Sopenharmony_ci va->size, align); 6962306a36Sopenharmony_ci err = i915_vma_pin(va, 0, 0, addr | PIN_OFFSET_FIXED | PIN_USER); 7062306a36Sopenharmony_ci if (err) { 7162306a36Sopenharmony_ci pr_err("Cannot pin at %llx+%llx\n", addr, va->size); 7262306a36Sopenharmony_ci goto out; 7362306a36Sopenharmony_ci } 7462306a36Sopenharmony_ci GEM_BUG_ON(i915_vma_offset(va) != addr); 7562306a36Sopenharmony_ci if (vb != va) { 7662306a36Sopenharmony_ci vb_node = vb->node; 7762306a36Sopenharmony_ci vb->node = va->node; /* overwrites the _same_ PTE */ 7862306a36Sopenharmony_ci } 7962306a36Sopenharmony_ci 8062306a36Sopenharmony_ci /* 8162306a36Sopenharmony_ci * Now choose random dword at the 1st pinned page. 8262306a36Sopenharmony_ci * 8362306a36Sopenharmony_ci * SZ_64K pages on dg1 require that the whole PT be marked 8462306a36Sopenharmony_ci * containing 64KiB entries. So we make sure that vma 8562306a36Sopenharmony_ci * covers the whole PT, despite being randomly aligned to 64KiB 8662306a36Sopenharmony_ci * and restrict our sampling to the 2MiB PT within where 8762306a36Sopenharmony_ci * we know that we will be using 64KiB pages. 8862306a36Sopenharmony_ci */ 8962306a36Sopenharmony_ci if (align == SZ_64K) 9062306a36Sopenharmony_ci addr = round_up(addr, SZ_2M); 9162306a36Sopenharmony_ci addr = igt_random_offset(prng, addr, addr + align, 8, 8); 9262306a36Sopenharmony_ci 9362306a36Sopenharmony_ci if (va != vb) 9462306a36Sopenharmony_ci pr_info("%s(%s): Sampling %llx, with alignment %llx, using PTE size %x (phys %x, sg %x), invalidate:%llx+%llx\n", 9562306a36Sopenharmony_ci ce->engine->name, va->obj->mm.region->name ?: "smem", 9662306a36Sopenharmony_ci addr, align, va->resource->page_sizes_gtt, 9762306a36Sopenharmony_ci va->page_sizes.phys, va->page_sizes.sg, 9862306a36Sopenharmony_ci addr & -length, length); 9962306a36Sopenharmony_ci 10062306a36Sopenharmony_ci cs = i915_gem_object_pin_map_unlocked(batch, I915_MAP_WC); 10162306a36Sopenharmony_ci *cs++ = MI_NOOP; /* for later termination */ 10262306a36Sopenharmony_ci /* 10362306a36Sopenharmony_ci * Sample the target to see if we spot the updated backing store. 10462306a36Sopenharmony_ci * Gen8 VCS compares immediate value with bitwise-and of two 10562306a36Sopenharmony_ci * consecutive DWORDS pointed by addr, other gen/engines compare value 10662306a36Sopenharmony_ci * with DWORD pointed by addr. Moreover we want to exercise DWORD size 10762306a36Sopenharmony_ci * invalidations. To fulfill all these requirements below values 10862306a36Sopenharmony_ci * have been chosen. 10962306a36Sopenharmony_ci */ 11062306a36Sopenharmony_ci *cs++ = MI_CONDITIONAL_BATCH_BUFFER_END | MI_DO_COMPARE | 2; 11162306a36Sopenharmony_ci *cs++ = 0; /* break if *addr == 0 */ 11262306a36Sopenharmony_ci *cs++ = lower_32_bits(addr); 11362306a36Sopenharmony_ci *cs++ = upper_32_bits(addr); 11462306a36Sopenharmony_ci vma_set_qw(va, addr, -1); 11562306a36Sopenharmony_ci vma_set_qw(vb, addr, 0); 11662306a36Sopenharmony_ci 11762306a36Sopenharmony_ci /* Keep sampling until we get bored */ 11862306a36Sopenharmony_ci *cs++ = MI_BATCH_BUFFER_START | BIT(8) | 1; 11962306a36Sopenharmony_ci *cs++ = lower_32_bits(i915_vma_offset(vma)); 12062306a36Sopenharmony_ci *cs++ = upper_32_bits(i915_vma_offset(vma)); 12162306a36Sopenharmony_ci 12262306a36Sopenharmony_ci i915_gem_object_flush_map(batch); 12362306a36Sopenharmony_ci 12462306a36Sopenharmony_ci rq = i915_request_create(ce); 12562306a36Sopenharmony_ci if (IS_ERR(rq)) { 12662306a36Sopenharmony_ci err = PTR_ERR(rq); 12762306a36Sopenharmony_ci goto out_va; 12862306a36Sopenharmony_ci } 12962306a36Sopenharmony_ci 13062306a36Sopenharmony_ci err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0); 13162306a36Sopenharmony_ci if (err) { 13262306a36Sopenharmony_ci i915_request_add(rq); 13362306a36Sopenharmony_ci goto out_va; 13462306a36Sopenharmony_ci } 13562306a36Sopenharmony_ci 13662306a36Sopenharmony_ci i915_request_get(rq); 13762306a36Sopenharmony_ci i915_request_add(rq); 13862306a36Sopenharmony_ci 13962306a36Sopenharmony_ci /* Short sleep to sanitycheck the batch is spinning before we begin */ 14062306a36Sopenharmony_ci msleep(10); 14162306a36Sopenharmony_ci if (va == vb) { 14262306a36Sopenharmony_ci if (!i915_request_completed(rq)) { 14362306a36Sopenharmony_ci pr_err("%s(%s): Semaphore sanitycheck failed %llx, with alignment %llx, using PTE size %x (phys %x, sg %x)\n", 14462306a36Sopenharmony_ci ce->engine->name, va->obj->mm.region->name ?: "smem", 14562306a36Sopenharmony_ci addr, align, va->resource->page_sizes_gtt, 14662306a36Sopenharmony_ci va->page_sizes.phys, va->page_sizes.sg); 14762306a36Sopenharmony_ci err = -EIO; 14862306a36Sopenharmony_ci } 14962306a36Sopenharmony_ci } else if (!i915_request_completed(rq)) { 15062306a36Sopenharmony_ci struct i915_vma_resource vb_res = { 15162306a36Sopenharmony_ci .bi.pages = vb->obj->mm.pages, 15262306a36Sopenharmony_ci .bi.page_sizes = vb->obj->mm.page_sizes, 15362306a36Sopenharmony_ci .start = i915_vma_offset(vb), 15462306a36Sopenharmony_ci .vma_size = i915_vma_size(vb) 15562306a36Sopenharmony_ci }; 15662306a36Sopenharmony_ci unsigned int pte_flags = 0; 15762306a36Sopenharmony_ci 15862306a36Sopenharmony_ci /* Flip the PTE between A and B */ 15962306a36Sopenharmony_ci if (i915_gem_object_is_lmem(vb->obj)) 16062306a36Sopenharmony_ci pte_flags |= PTE_LM; 16162306a36Sopenharmony_ci ce->vm->insert_entries(ce->vm, &vb_res, pat_index, pte_flags); 16262306a36Sopenharmony_ci 16362306a36Sopenharmony_ci /* Flush the PTE update to concurrent HW */ 16462306a36Sopenharmony_ci tlbinv(ce->vm, addr & -length, length); 16562306a36Sopenharmony_ci 16662306a36Sopenharmony_ci if (wait_for(i915_request_completed(rq), HZ / 2)) { 16762306a36Sopenharmony_ci pr_err("%s: Request did not complete; the COND_BBE did not read the updated PTE\n", 16862306a36Sopenharmony_ci ce->engine->name); 16962306a36Sopenharmony_ci err = -EINVAL; 17062306a36Sopenharmony_ci } 17162306a36Sopenharmony_ci } else { 17262306a36Sopenharmony_ci pr_err("Spinner ended unexpectedly\n"); 17362306a36Sopenharmony_ci err = -EIO; 17462306a36Sopenharmony_ci } 17562306a36Sopenharmony_ci i915_request_put(rq); 17662306a36Sopenharmony_ci 17762306a36Sopenharmony_ci cs = page_mask_bits(batch->mm.mapping); 17862306a36Sopenharmony_ci *cs = MI_BATCH_BUFFER_END; 17962306a36Sopenharmony_ci wmb(); 18062306a36Sopenharmony_ci 18162306a36Sopenharmony_ciout_va: 18262306a36Sopenharmony_ci if (vb != va) 18362306a36Sopenharmony_ci vb->node = vb_node; 18462306a36Sopenharmony_ci i915_vma_unpin(va); 18562306a36Sopenharmony_ci if (i915_vma_unbind_unlocked(va)) 18662306a36Sopenharmony_ci err = -EIO; 18762306a36Sopenharmony_ciout: 18862306a36Sopenharmony_ci i915_gem_object_put(batch); 18962306a36Sopenharmony_ci return err; 19062306a36Sopenharmony_ci} 19162306a36Sopenharmony_ci 19262306a36Sopenharmony_cistatic struct drm_i915_gem_object *create_lmem(struct intel_gt *gt) 19362306a36Sopenharmony_ci{ 19462306a36Sopenharmony_ci struct intel_memory_region *mr = gt->i915->mm.regions[INTEL_REGION_LMEM_0]; 19562306a36Sopenharmony_ci resource_size_t size = SZ_1G; 19662306a36Sopenharmony_ci 19762306a36Sopenharmony_ci /* 19862306a36Sopenharmony_ci * Allocation of largest possible page size allows to test all types 19962306a36Sopenharmony_ci * of pages. To succeed with both allocations, especially in case of Small 20062306a36Sopenharmony_ci * BAR, try to allocate no more than quarter of mappable memory. 20162306a36Sopenharmony_ci */ 20262306a36Sopenharmony_ci if (mr && size > mr->io_size / 4) 20362306a36Sopenharmony_ci size = mr->io_size / 4; 20462306a36Sopenharmony_ci 20562306a36Sopenharmony_ci return i915_gem_object_create_lmem(gt->i915, size, I915_BO_ALLOC_CONTIGUOUS); 20662306a36Sopenharmony_ci} 20762306a36Sopenharmony_ci 20862306a36Sopenharmony_cistatic struct drm_i915_gem_object *create_smem(struct intel_gt *gt) 20962306a36Sopenharmony_ci{ 21062306a36Sopenharmony_ci /* 21162306a36Sopenharmony_ci * SZ_64K pages require covering the whole 2M PT (gen8 to tgl/dg1). 21262306a36Sopenharmony_ci * While that does not require the whole 2M block to be contiguous 21362306a36Sopenharmony_ci * it is easier to make it so, since we need that for SZ_2M pagees. 21462306a36Sopenharmony_ci * Since we randomly offset the start of the vma, we need a 4M object 21562306a36Sopenharmony_ci * so that there is a 2M range within it is suitable for SZ_64K PTE. 21662306a36Sopenharmony_ci */ 21762306a36Sopenharmony_ci return i915_gem_object_create_internal(gt->i915, SZ_4M); 21862306a36Sopenharmony_ci} 21962306a36Sopenharmony_ci 22062306a36Sopenharmony_cistatic int 22162306a36Sopenharmony_cimem_tlbinv(struct intel_gt *gt, 22262306a36Sopenharmony_ci struct drm_i915_gem_object *(*create_fn)(struct intel_gt *), 22362306a36Sopenharmony_ci void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length)) 22462306a36Sopenharmony_ci{ 22562306a36Sopenharmony_ci unsigned int ppgtt_size = RUNTIME_INFO(gt->i915)->ppgtt_size; 22662306a36Sopenharmony_ci struct intel_engine_cs *engine; 22762306a36Sopenharmony_ci struct drm_i915_gem_object *A, *B; 22862306a36Sopenharmony_ci struct i915_ppgtt *ppgtt; 22962306a36Sopenharmony_ci struct i915_vma *va, *vb; 23062306a36Sopenharmony_ci enum intel_engine_id id; 23162306a36Sopenharmony_ci I915_RND_STATE(prng); 23262306a36Sopenharmony_ci void *vaddr; 23362306a36Sopenharmony_ci int err; 23462306a36Sopenharmony_ci 23562306a36Sopenharmony_ci /* 23662306a36Sopenharmony_ci * Check that the TLB invalidate is able to revoke an active 23762306a36Sopenharmony_ci * page. We load a page into a spinning COND_BBE loop and then 23862306a36Sopenharmony_ci * remap that page to a new physical address. The old address, and 23962306a36Sopenharmony_ci * so the loop keeps spinning, is retained in the TLB cache until 24062306a36Sopenharmony_ci * we issue an invalidate. 24162306a36Sopenharmony_ci */ 24262306a36Sopenharmony_ci 24362306a36Sopenharmony_ci A = create_fn(gt); 24462306a36Sopenharmony_ci if (IS_ERR(A)) 24562306a36Sopenharmony_ci return PTR_ERR(A); 24662306a36Sopenharmony_ci 24762306a36Sopenharmony_ci vaddr = i915_gem_object_pin_map_unlocked(A, I915_MAP_WC); 24862306a36Sopenharmony_ci if (IS_ERR(vaddr)) { 24962306a36Sopenharmony_ci err = PTR_ERR(vaddr); 25062306a36Sopenharmony_ci goto out_a; 25162306a36Sopenharmony_ci } 25262306a36Sopenharmony_ci 25362306a36Sopenharmony_ci B = create_fn(gt); 25462306a36Sopenharmony_ci if (IS_ERR(B)) { 25562306a36Sopenharmony_ci err = PTR_ERR(B); 25662306a36Sopenharmony_ci goto out_a; 25762306a36Sopenharmony_ci } 25862306a36Sopenharmony_ci 25962306a36Sopenharmony_ci vaddr = i915_gem_object_pin_map_unlocked(B, I915_MAP_WC); 26062306a36Sopenharmony_ci if (IS_ERR(vaddr)) { 26162306a36Sopenharmony_ci err = PTR_ERR(vaddr); 26262306a36Sopenharmony_ci goto out_b; 26362306a36Sopenharmony_ci } 26462306a36Sopenharmony_ci 26562306a36Sopenharmony_ci GEM_BUG_ON(A->base.size != B->base.size); 26662306a36Sopenharmony_ci if ((A->mm.page_sizes.phys | B->mm.page_sizes.phys) & (A->base.size - 1)) 26762306a36Sopenharmony_ci pr_warn("Failed to allocate contiguous pages for size %zx\n", 26862306a36Sopenharmony_ci A->base.size); 26962306a36Sopenharmony_ci 27062306a36Sopenharmony_ci ppgtt = i915_ppgtt_create(gt, 0); 27162306a36Sopenharmony_ci if (IS_ERR(ppgtt)) { 27262306a36Sopenharmony_ci err = PTR_ERR(ppgtt); 27362306a36Sopenharmony_ci goto out_b; 27462306a36Sopenharmony_ci } 27562306a36Sopenharmony_ci 27662306a36Sopenharmony_ci va = i915_vma_instance(A, &ppgtt->vm, NULL); 27762306a36Sopenharmony_ci if (IS_ERR(va)) { 27862306a36Sopenharmony_ci err = PTR_ERR(va); 27962306a36Sopenharmony_ci goto out_vm; 28062306a36Sopenharmony_ci } 28162306a36Sopenharmony_ci 28262306a36Sopenharmony_ci vb = i915_vma_instance(B, &ppgtt->vm, NULL); 28362306a36Sopenharmony_ci if (IS_ERR(vb)) { 28462306a36Sopenharmony_ci err = PTR_ERR(vb); 28562306a36Sopenharmony_ci goto out_vm; 28662306a36Sopenharmony_ci } 28762306a36Sopenharmony_ci 28862306a36Sopenharmony_ci err = 0; 28962306a36Sopenharmony_ci for_each_engine(engine, gt, id) { 29062306a36Sopenharmony_ci struct i915_gem_ww_ctx ww; 29162306a36Sopenharmony_ci struct intel_context *ce; 29262306a36Sopenharmony_ci int bit; 29362306a36Sopenharmony_ci 29462306a36Sopenharmony_ci ce = intel_context_create(engine); 29562306a36Sopenharmony_ci if (IS_ERR(ce)) { 29662306a36Sopenharmony_ci err = PTR_ERR(ce); 29762306a36Sopenharmony_ci break; 29862306a36Sopenharmony_ci } 29962306a36Sopenharmony_ci 30062306a36Sopenharmony_ci i915_vm_put(ce->vm); 30162306a36Sopenharmony_ci ce->vm = i915_vm_get(&ppgtt->vm); 30262306a36Sopenharmony_ci 30362306a36Sopenharmony_ci for_i915_gem_ww(&ww, err, true) 30462306a36Sopenharmony_ci err = intel_context_pin_ww(ce, &ww); 30562306a36Sopenharmony_ci if (err) 30662306a36Sopenharmony_ci goto err_put; 30762306a36Sopenharmony_ci 30862306a36Sopenharmony_ci for_each_set_bit(bit, 30962306a36Sopenharmony_ci (unsigned long *)&RUNTIME_INFO(gt->i915)->page_sizes, 31062306a36Sopenharmony_ci BITS_PER_TYPE(RUNTIME_INFO(gt->i915)->page_sizes)) { 31162306a36Sopenharmony_ci unsigned int len; 31262306a36Sopenharmony_ci 31362306a36Sopenharmony_ci if (BIT_ULL(bit) < i915_vm_obj_min_alignment(va->vm, va->obj)) 31462306a36Sopenharmony_ci continue; 31562306a36Sopenharmony_ci 31662306a36Sopenharmony_ci /* sanitycheck the semaphore wake up */ 31762306a36Sopenharmony_ci err = pte_tlbinv(ce, va, va, 31862306a36Sopenharmony_ci BIT_ULL(bit), 31962306a36Sopenharmony_ci NULL, SZ_4K, 32062306a36Sopenharmony_ci &prng); 32162306a36Sopenharmony_ci if (err) 32262306a36Sopenharmony_ci goto err_unpin; 32362306a36Sopenharmony_ci 32462306a36Sopenharmony_ci for (len = 2; len <= ppgtt_size; len = min(2 * len, ppgtt_size)) { 32562306a36Sopenharmony_ci err = pte_tlbinv(ce, va, vb, 32662306a36Sopenharmony_ci BIT_ULL(bit), 32762306a36Sopenharmony_ci tlbinv, 32862306a36Sopenharmony_ci BIT_ULL(len), 32962306a36Sopenharmony_ci &prng); 33062306a36Sopenharmony_ci if (err) 33162306a36Sopenharmony_ci goto err_unpin; 33262306a36Sopenharmony_ci if (len == ppgtt_size) 33362306a36Sopenharmony_ci break; 33462306a36Sopenharmony_ci } 33562306a36Sopenharmony_ci } 33662306a36Sopenharmony_cierr_unpin: 33762306a36Sopenharmony_ci intel_context_unpin(ce); 33862306a36Sopenharmony_cierr_put: 33962306a36Sopenharmony_ci intel_context_put(ce); 34062306a36Sopenharmony_ci if (err) 34162306a36Sopenharmony_ci break; 34262306a36Sopenharmony_ci } 34362306a36Sopenharmony_ci 34462306a36Sopenharmony_ci if (igt_flush_test(gt->i915)) 34562306a36Sopenharmony_ci err = -EIO; 34662306a36Sopenharmony_ci 34762306a36Sopenharmony_ciout_vm: 34862306a36Sopenharmony_ci i915_vm_put(&ppgtt->vm); 34962306a36Sopenharmony_ciout_b: 35062306a36Sopenharmony_ci i915_gem_object_put(B); 35162306a36Sopenharmony_ciout_a: 35262306a36Sopenharmony_ci i915_gem_object_put(A); 35362306a36Sopenharmony_ci return err; 35462306a36Sopenharmony_ci} 35562306a36Sopenharmony_ci 35662306a36Sopenharmony_cistatic void tlbinv_full(struct i915_address_space *vm, u64 addr, u64 length) 35762306a36Sopenharmony_ci{ 35862306a36Sopenharmony_ci intel_gt_invalidate_tlb_full(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1); 35962306a36Sopenharmony_ci} 36062306a36Sopenharmony_ci 36162306a36Sopenharmony_cistatic int invalidate_full(void *arg) 36262306a36Sopenharmony_ci{ 36362306a36Sopenharmony_ci struct intel_gt *gt = arg; 36462306a36Sopenharmony_ci int err; 36562306a36Sopenharmony_ci 36662306a36Sopenharmony_ci if (GRAPHICS_VER(gt->i915) < 8) 36762306a36Sopenharmony_ci return 0; /* TLB invalidate not implemented */ 36862306a36Sopenharmony_ci 36962306a36Sopenharmony_ci err = mem_tlbinv(gt, create_smem, tlbinv_full); 37062306a36Sopenharmony_ci if (err == 0) 37162306a36Sopenharmony_ci err = mem_tlbinv(gt, create_lmem, tlbinv_full); 37262306a36Sopenharmony_ci if (err == -ENODEV || err == -ENXIO) 37362306a36Sopenharmony_ci err = 0; 37462306a36Sopenharmony_ci 37562306a36Sopenharmony_ci return err; 37662306a36Sopenharmony_ci} 37762306a36Sopenharmony_ci 37862306a36Sopenharmony_ciint intel_tlb_live_selftests(struct drm_i915_private *i915) 37962306a36Sopenharmony_ci{ 38062306a36Sopenharmony_ci static const struct i915_subtest tests[] = { 38162306a36Sopenharmony_ci SUBTEST(invalidate_full), 38262306a36Sopenharmony_ci }; 38362306a36Sopenharmony_ci struct intel_gt *gt; 38462306a36Sopenharmony_ci unsigned int i; 38562306a36Sopenharmony_ci 38662306a36Sopenharmony_ci for_each_gt(gt, i915, i) { 38762306a36Sopenharmony_ci int err; 38862306a36Sopenharmony_ci 38962306a36Sopenharmony_ci if (intel_gt_is_wedged(gt)) 39062306a36Sopenharmony_ci continue; 39162306a36Sopenharmony_ci 39262306a36Sopenharmony_ci err = intel_gt_live_subtests(tests, gt); 39362306a36Sopenharmony_ci if (err) 39462306a36Sopenharmony_ci return err; 39562306a36Sopenharmony_ci } 39662306a36Sopenharmony_ci 39762306a36Sopenharmony_ci return 0; 39862306a36Sopenharmony_ci} 399