Lines Matching defs:pvc
3503 * vcore *pvc onto the execution of the other vcores described in *cip.
3505 static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
3508 if (cip->total_threads + pvc->num_threads > target_threads)
3511 return can_dynamic_split(pvc, cip);
3536 struct kvmppc_vcore *pvc, *vcnext;
3539 list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
3540 if (!spin_trylock(&pvc->lock))
3542 prepare_threads(pvc);
3543 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
3544 list_del_init(&pvc->preempt_list);
3545 if (pvc->runner == NULL) {
3546 pvc->vcore_state = VCORE_INACTIVE;
3547 kvmppc_core_end_stolen(pvc, mftb());
3549 spin_unlock(&pvc->lock);
3552 if (!can_piggyback(pvc, cip, target_threads)) {
3553 spin_unlock(&pvc->lock);
3556 kvmppc_core_end_stolen(pvc, mftb());
3557 pvc->vcore_state = VCORE_PIGGYBACK;
3716 struct kvmppc_vcore *pvc;
3796 pvc = core_info.vc[sub];
3798 kvmppc_vcore_preempt(pvc);
3799 spin_unlock(&pvc->lock);
3891 pvc = core_info.vc[sub];
3892 pvc->pcpu = pcpu + thr;
3893 for_each_runnable_thread(i, vcpu, pvc) {
3902 kvmppc_start_thread(vcpu, pvc);
3903 kvmppc_update_vpa_dispatch(vcpu, pvc);
3914 kvmppc_start_thread(NULL, pvc);
4019 pvc = core_info.vc[sub];
4020 post_guest_process(pvc, pvc == vc);