Lines Matching refs:fault

66 			u8  fault;
68 } **fault;
163 * page fault) and maybe some other commands.
382 /* Issue fault replay for GPU to retry accesses that faulted previously. */
393 /* Cancel a replayable fault that could not be handled.
395 * Cancelling the fault will trigger recovery to reset the engine
415 struct nouveau_svm_fault *fault)
417 nouveau_svm_fault_cancel(svm, fault->inst,
418 fault->hub,
419 fault->gpc,
420 fault->client);
424 nouveau_svm_fault_priority(u8 fault)
426 switch (fault) {
472 struct nouveau_svm_fault *fault;
480 if (!buffer->fault[buffer->fault_nr]) {
481 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
482 if (WARN_ON(!fault)) {
486 buffer->fault[buffer->fault_nr] = fault;
489 fault = buffer->fault[buffer->fault_nr++];
490 fault->inst = inst;
491 fault->addr = (u64)addrhi << 32 | addrlo;
492 fault->time = (u64)timehi << 32 | timelo;
493 fault->engine = engine;
494 fault->gpc = gpc;
495 fault->hub = hub;
496 fault->access = (info & 0x000f0000) >> 16;
497 fault->client = client;
498 fault->fault = (info & 0x0000001f);
500 SVM_DBG(svm, "fault %016llx %016llx %02x",
501 fault->inst, fault->addr, fault->access);
659 /* Have HMM fault pages within the fault window to the GPU. */
731 /* Parse available fault buffer entries into a cache, and update
734 SVM_DBG(svm, "fault handler");
750 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
756 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
762 if (!svmm || buffer->fault[fi]->inst != inst) {
764 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
766 inst = buffer->fault[fi]->inst;
769 buffer->fault[fi]->svmm = svmm;
785 if (!(svmm = buffer->fault[fi]->svmm)) {
786 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
789 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
794 start = buffer->fault[fi]->addr;
801 * fault window, determining required pages and access
808 * Determine required permissions based on GPU fault
811 switch (buffer->fault[fi]->access) {
828 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
852 * fault addresses have sufficient access permission.
854 if (buffer->fault[fn]->svmm != svmm ||
855 buffer->fault[fn]->addr >= limit ||
856 (buffer->fault[fi]->access == FAULT_ACCESS_READ &&
858 (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
859 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
861 (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
862 buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
863 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
871 struct nouveau_svm_fault *fault =
872 buffer->fault[fi++];
874 nouveau_svm_fault_cancel_fault(svm, fault);
880 /* Issue fault replay to the GPU. */
975 if (buffer->fault) {
976 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
977 kfree(buffer->fault[i]);
978 kvfree(buffer->fault);
1014 buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL);
1015 if (!buffer->fault)
1075 SVM_DBG(svm, "No supported fault buffer class");