Lines Matching refs:buffer

70 	} buffer[1];
457 struct nouveau_svm_fault_buffer *buffer, u32 offset)
459 struct nvif_object *memory = &buffer->object;
480 if (!buffer->fault[buffer->fault_nr]) {
486 buffer->fault[buffer->fault_nr] = fault;
489 fault = buffer->fault[buffer->fault_nr++];
718 struct nouveau_svm_fault_buffer *buffer = container_of(work, typeof(*buffer), work);
719 struct nouveau_svm *svm = container_of(buffer, typeof(*svm), buffer[buffer->id]);
731 /* Parse available fault buffer entries into a cache, and update
735 if (buffer->get == buffer->put) {
736 buffer->put = nvif_rd32(device, buffer->putaddr);
737 buffer->get = nvif_rd32(device, buffer->getaddr);
738 if (buffer->get == buffer->put)
741 buffer->fault_nr = 0;
743 SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
744 while (buffer->get != buffer->put) {
745 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
746 if (++buffer->get == buffer->entries)
747 buffer->get = 0;
749 nvif_wr32(device, buffer->getaddr, buffer->get);
750 SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
756 sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
761 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
762 if (!svmm || buffer->fault[fi]->inst != inst) {
764 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
766 inst = buffer->fault[fi]->inst;
769 buffer->fault[fi]->svmm = svmm;
780 for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
785 if (!(svmm = buffer->fault[fi]->svmm)) {
786 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
789 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
794 start = buffer->fault[fi]->addr;
811 switch (buffer->fault[fi]->access) {
828 nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
844 for (fn = fi; ++fn < buffer->fault_nr; ) {
854 if (buffer->fault[fn]->svmm != svmm ||
855 buffer->fault[fn]->addr >= limit ||
856 (buffer->fault[fi]->access == FAULT_ACCESS_READ &&
858 (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
859 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
861 (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
862 buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
863 buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
872 buffer->fault[fi++];
888 struct nouveau_svm_fault_buffer *buffer = container_of(event, typeof(*buffer), notify);
890 schedule_work(&buffer->work);
945 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
947 nvif_event_block(&buffer->notify);
948 flush_work(&buffer->work);
954 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
957 buffer->get = nvif_rd32(device, buffer->getaddr);
958 buffer->put = nvif_rd32(device, buffer->putaddr);
959 SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
961 return nvif_event_allow(&buffer->notify);
967 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
970 if (!nvif_object_constructed(&buffer->object))
975 if (buffer->fault) {
976 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
977 kfree(buffer->fault[i]);
978 kvfree(buffer->fault);
981 nvif_event_dtor(&buffer->notify);
982 nvif_object_dtor(&buffer->object);
988 struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
994 buffer->id = id;
997 sizeof(args), &buffer->object);
999 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
1003 nvif_object_map(&buffer->object, NULL, 0);
1004 buffer->entries = args.entries;
1005 buffer->getaddr = args.get;
1006 buffer->putaddr = args.put;
1007 INIT_WORK(&buffer->work, nouveau_svm_fault);
1009 ret = nvif_event_ctor(&buffer->object, "svmFault", id, nouveau_svm_event, true, NULL, 0,
1010 &buffer->notify);
1014 buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL);
1015 if (!buffer->fault)
1075 SVM_DBG(svm, "No supported fault buffer class");