Lines Matching defs:work

45 static void async_pf_execute(struct work_struct *work)
48 container_of(work, struct kvm_async_pf, work);
59 * This work is run asynchronously to the task which owns
98 /* cancel outstanding work queue item */
100 struct kvm_async_pf *work =
102 typeof(*work), queue);
103 list_del(&work->queue);
109 if (!work->vcpu)
114 flush_work(&work->work);
116 if (cancel_work_sync(&work->work)) {
117 mmput(work->mm);
118 kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
119 kmem_cache_free(async_pf_cache, work);
126 struct kvm_async_pf *work =
128 typeof(*work), link);
129 list_del(&work->link);
130 kmem_cache_free(async_pf_cache, work);
139 struct kvm_async_pf *work;
144 work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
146 list_del(&work->link);
149 kvm_arch_async_page_ready(vcpu, work);
151 kvm_arch_async_page_present(vcpu, work);
153 list_del(&work->queue);
155 kmem_cache_free(async_pf_cache, work);
166 struct kvm_async_pf *work;
179 work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
180 if (!work)
183 work->wakeup_all = false;
184 work->vcpu = vcpu;
185 work->cr2_or_gpa = cr2_or_gpa;
186 work->addr = hva;
187 work->arch = *arch;
188 work->mm = current->mm;
189 mmget(work->mm);
190 kvm_get_kvm(work->vcpu->kvm);
192 INIT_WORK(&work->work, async_pf_execute);
194 list_add_tail(&work->queue, &vcpu->async_pf.queue);
196 work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
198 schedule_work(&work->work);
205 struct kvm_async_pf *work;
211 work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
212 if (!work)
215 work->wakeup_all = true;
216 INIT_LIST_HEAD(&work->queue); /* for list_del to work */
220 list_add_tail(&work->link, &vcpu->async_pf.done);