Lines Matching refs:buffer
67 } buffer[1];
435 struct nouveau_svm_fault_buffer *buffer, u32 offset)
437 struct nvif_object *memory = &buffer->object;
458 if (!buffer->fault[buffer->fault_nr]) {
464 buffer->fault[buffer->fault_nr] = fault;
467 fault = buffer->fault[buffer->fault_nr++];
619 struct nouveau_svm_fault_buffer *buffer =
620 container_of(notify, typeof(*buffer), notify);
622 container_of(buffer, typeof(*svm), buffer[buffer->id]);
634 /* Parse available fault buffer entries into a cache, and update
638 if (buffer->get == buffer->put) {
639 buffer->put = nvif_rd32(device, buffer->putaddr);
640 buffer->get = nvif_rd32(device, buffer->getaddr);
641 if (buffer->get == buffer->put)
644 buffer->fault_nr = 0;
646 SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
647 while (buffer->get != buffer->put) {
648 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
649 if (++buffer->get == buffer->entries)
650 buffer->get = 0;
652 nvif_wr32(device, buffer->getaddr, buffer->get);
653 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
659 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
664 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
665 if (!svmm || buffer->fault[fi]->inst != inst) {
667 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
669 inst = buffer->fault[fi]->inst;
672 buffer->fault[fi]->svmm = svmm;
683 for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
688 if (!(svmm = buffer->fault[fi]->svmm)) {
689 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
692 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
697 start = buffer->fault[fi]->addr;
715 switch (buffer->fault[fi]->access) {
729 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
745 for (fn = fi; ++fn < buffer->fault_nr; ) {
755 if (buffer->fault[fn]->svmm != svmm ||
756 buffer->fault[fn]->addr >= limit ||
757 (buffer->fault[fi]->access == 0 /* READ. */ &&
759 (buffer->fault[fi]->access != 0 /* READ. */ &&
760 buffer->fault[fi]->access != 3 /* PREFETCH. */ &&
769 buffer->fault[fi++];
836 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
837 nvif_notify_put(&buffer->notify);
843 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
845 buffer->get = nvif_rd32(device, buffer->getaddr);
846 buffer->put = nvif_rd32(device, buffer->putaddr);
847 SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
848 return nvif_notify_get(&buffer->notify);
854 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
857 if (buffer->fault) {
858 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
859 kfree(buffer->fault[i]);
860 kvfree(buffer->fault);
865 nvif_notify_dtor(&buffer->notify);
866 nvif_object_dtor(&buffer->object);
872 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
878 buffer->id = id;
881 sizeof(args), &buffer->object);
883 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
887 nvif_object_map(&buffer->object, NULL, 0);
888 buffer->entries = args.entries;
889 buffer->getaddr = args.get;
890 buffer->putaddr = args.put;
892 ret = nvif_notify_ctor(&buffer->object, "svmFault", nouveau_svm_fault,
894 &buffer->notify);
898 buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
899 if (!buffer->fault)
959 SVM_DBG(svm, "No supported fault buffer class");