Lines Matching refs:fault

63 			u8  fault;
65 } **fault;
155 * page fault) and maybe some other commands.
377 /* Issue fault replay for GPU to retry accesses that faulted previously. */
388 /* Cancel a replayable fault that could not be handled.
390 * Cancelling the fault will trigger recovery to reset the engine
410 struct nouveau_svm_fault *fault)
412 nouveau_svm_fault_cancel(svm, fault->inst,
413 fault->hub,
414 fault->gpc,
415 fault->client);
450 struct nouveau_svm_fault *fault;
458 if (!buffer->fault[buffer->fault_nr]) {
459 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
460 if (WARN_ON(!fault)) {
464 buffer->fault[buffer->fault_nr] = fault;
467 fault = buffer->fault[buffer->fault_nr++];
468 fault->inst = inst;
469 fault->addr = (u64)addrhi << 32 | addrlo;
470 fault->time = (u64)timehi << 32 | timelo;
471 fault->engine = engine;
472 fault->gpc = gpc;
473 fault->hub = hub;
474 fault->access = (info & 0x000f0000) >> 16;
475 fault->client = client;
476 fault->fault = (info & 0x0000001f);
478 SVM_DBG(svm, "fault %016llx %016llx %02x",
479 fault->inst, fault->addr, fault->access);
570 /* Have HMM fault pages within the fault window to the GPU. */
634 /* Parse available fault buffer entries into a cache, and update
637 SVM_DBG(svm, "fault handler");
653 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
659 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
665 if (!svmm || buffer->fault[fi]->inst != inst) {
667 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
669 inst = buffer->fault[fi]->inst;
672 buffer->fault[fi]->svmm = svmm;
688 if (!(svmm = buffer->fault[fi]->svmm)) {
689 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
692 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
697 start = buffer->fault[fi]->addr;
704 * fault window, determining required pages and access
711 * Determine required permissions based on GPU fault
715 switch (buffer->fault[fi]->access) {
729 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
753 * fault addresses have sufficient access permission.
755 if (buffer->fault[fn]->svmm != svmm ||
756 buffer->fault[fn]->addr >= limit ||
757 (buffer->fault[fi]->access == 0 /* READ. */ &&
759 (buffer->fault[fi]->access != 0 /* READ. */ &&
760 buffer->fault[fi]->access != 3 /* PREFETCH. */ &&
768 struct nouveau_svm_fault *fault =
769 buffer->fault[fi++];
771 nouveau_svm_fault_cancel_fault(svm, fault);
777 /* Issue fault replay to the GPU. */
857 if (buffer->fault) {
858 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
859 kfree(buffer->fault[i]);
860 kvfree(buffer->fault);
898 buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
899 if (!buffer->fault)
959 SVM_DBG(svm, "No supported fault buffer class");