/kernel/linux/linux-5.10/tools/perf/scripts/python/ |
H A D | compaction-times.py | 72 self.migrated = pair(0, 0, "moved", "failed") 78 self.migrated += rhs.migrated 87 s += "migration: %s" % self.migrated 99 def increment(self, migrated, fscan, mscan): 100 if (migrated != None): 101 self.migrated += migrated 132 def increment_pending(cls, pid, migrated, fscan, mscan): 136 head.do_increment(migrated, fsca [all...] |
/kernel/linux/linux-6.6/tools/perf/scripts/python/ |
H A D | compaction-times.py | 72 self.migrated = pair(0, 0, "moved", "failed") 78 self.migrated += rhs.migrated 87 s += "migration: %s" % self.migrated 99 def increment(self, migrated, fscan, mscan): 100 if (migrated != None): 101 self.migrated += migrated 132 def increment_pending(cls, pid, migrated, fscan, mscan): 136 head.do_increment(migrated, fsca [all...] |
/kernel/linux/linux-5.10/arch/xtensa/include/asm/ |
H A D | mmu_context.h | 129 int migrated = next->context.cpu != cpu; in switch_mm() local 130 /* Flush the icache if we migrated to a new core. */ in switch_mm() 131 if (migrated) { in switch_mm() 135 if (migrated || prev != next) in switch_mm()
|
/kernel/linux/linux-6.6/arch/xtensa/include/asm/ |
H A D | mmu_context.h | 130 int migrated = next->context.cpu != cpu; in switch_mm() local 131 /* Flush the icache if we migrated to a new core. */ in switch_mm() 132 if (migrated) { in switch_mm() 136 if (migrated || prev != next) in switch_mm()
|
/kernel/linux/linux-5.10/fs/nfsd/ |
H A D | export.c | 421 int migrated, i, err; in fsloc_parse() local 461 /* migrated */ in fsloc_parse() 462 err = get_int(mesg, &migrated); in fsloc_parse() 466 if (migrated < 0 || migrated > 1) in fsloc_parse() 468 fsloc->migrated = migrated; in fsloc_parse() 741 new->ex_fslocs.migrated = 0; in svc_export_init() 765 new->ex_fslocs.migrated = item->ex_fslocs.migrated; in export_update() [all...] |
H A D | export.h | 31 * list of replicas that do serve it) then we set "migrated": */ 32 int migrated; member
|
/kernel/linux/linux-6.6/fs/nfsd/ |
H A D | export.c | 450 int migrated, i, err; in fsloc_parse() local 490 /* migrated */ in fsloc_parse() 491 err = get_int(mesg, &migrated); in fsloc_parse() 495 if (migrated < 0 || migrated > 1) in fsloc_parse() 497 fsloc->migrated = migrated; in fsloc_parse() 818 new->ex_fslocs.migrated = 0; in svc_export_init() 843 new->ex_fslocs.migrated = item->ex_fslocs.migrated; in export_update() [all...] |
H A D | export.h | 32 * list of replicas that do serve it) then we set "migrated": */ 33 int migrated; member
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
H A D | ruc.c | 78 bool migrated = packet->migrated; in hfi1_ruc_check_hdr() local 81 if (qp->s_mig_state == IB_MIG_ARMED && migrated) { in hfi1_ruc_check_hdr() 151 if (qp->s_mig_state == IB_MIG_REARM && !migrated) in hfi1_ruc_check_hdr()
|
H A D | driver.c | 1561 packet->migrated = ib_bth_is_migration(packet->ohdr); in hfi1_setup_9B_packet() 1598 packet->migrated = opa_bth_is_migration(packet->ohdr); in hfi1_setup_bypass_packet() 1610 packet->migrated = opa_bth_is_migration(packet->ohdr); in hfi1_setup_bypass_packet() 1624 packet->migrated = false; in hfi1_setup_bypass_packet()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/ |
H A D | ruc.c | 36 bool migrated = packet->migrated; in hfi1_ruc_check_hdr() local 39 if (qp->s_mig_state == IB_MIG_ARMED && migrated) { in hfi1_ruc_check_hdr() 109 if (qp->s_mig_state == IB_MIG_REARM && !migrated) in hfi1_ruc_check_hdr()
|
H A D | driver.c | 1514 packet->migrated = ib_bth_is_migration(packet->ohdr); in hfi1_setup_9B_packet() 1551 packet->migrated = opa_bth_is_migration(packet->ohdr); in hfi1_setup_bypass_packet() 1563 packet->migrated = opa_bth_is_migration(packet->ohdr); in hfi1_setup_bypass_packet() 1577 packet->migrated = false; in hfi1_setup_bypass_packet()
|
/kernel/linux/linux-5.10/fs/xfs/ |
H A D | xfs_mru_cache.c | 65 * this happens, all the elements on the LRU list need to be migrated to the end 118 * Case a) above is detected by counting how many groups are migrated and 120 * time_zero field, which is updated as each group is migrated. 131 unsigned int migrated = 0; in _xfs_mru_cache_migrate() local 158 * lists have been migrated to the reap list, it's now empty. in _xfs_mru_cache_migrate() 160 if (++migrated == mru->grp_count) { in _xfs_mru_cache_migrate() 185 * MRU list. The lists must be migrated first to ensure that they're 219 * When destroying or reaping, all the elements that were migrated to the reap
|
/kernel/linux/linux-6.6/fs/xfs/ |
H A D | xfs_mru_cache.c | 65 * this happens, all the elements on the LRU list need to be migrated to the end 118 * Case a) above is detected by counting how many groups are migrated and 120 * time_zero field, which is updated as each group is migrated. 131 unsigned int migrated = 0; in _xfs_mru_cache_migrate() local 158 * lists have been migrated to the reap list, it's now empty. in _xfs_mru_cache_migrate() 160 if (++migrated == mru->grp_count) { in _xfs_mru_cache_migrate() 185 * MRU list. The lists must be migrated first to ensure that they're 219 * When destroying or reaping, all the elements that were migrated to the reap
|
/kernel/linux/linux-5.10/arch/mips/kvm/ |
H A D | vz.c | 2502 bool migrated; in kvm_vz_vcpu_load_tlb() local 2508 migrated = (vcpu->arch.last_exec_cpu != cpu); in kvm_vz_vcpu_load_tlb() 2525 if (migrated || in kvm_vz_vcpu_load_tlb() 2544 if (migrated || last_exec_vcpu[cpu] != vcpu) in kvm_vz_vcpu_load_tlb() 2562 bool migrated, all; in kvm_vz_vcpu_load() local 2565 * Have we migrated to a different CPU? in kvm_vz_vcpu_load() 2568 migrated = (vcpu->arch.last_sched_cpu != cpu); in kvm_vz_vcpu_load() 2574 all = migrated || (last_vcpu[cpu] != vcpu); in kvm_vz_vcpu_load()
|
/kernel/linux/linux-6.6/arch/mips/kvm/ |
H A D | vz.c | 2506 bool migrated; in kvm_vz_vcpu_load_tlb() local 2512 migrated = (vcpu->arch.last_exec_cpu != cpu); in kvm_vz_vcpu_load_tlb() 2529 if (migrated || in kvm_vz_vcpu_load_tlb() 2548 if (migrated || last_exec_vcpu[cpu] != vcpu) in kvm_vz_vcpu_load_tlb() 2566 bool migrated, all; in kvm_vz_vcpu_load() local 2569 * Have we migrated to a different CPU? in kvm_vz_vcpu_load() 2572 migrated = (vcpu->arch.last_sched_cpu != cpu); in kvm_vz_vcpu_load() 2578 all = migrated || (last_vcpu[cpu] != vcpu); in kvm_vz_vcpu_load()
|
/kernel/linux/linux-5.10/arch/loongarch/kvm/ |
H A D | loongarch.c | 489 bool migrated; in _kvm_update_vmid() local 497 migrated = (vcpu->arch.last_exec_cpu != cpu); in _kvm_update_vmid() 508 if (migrated || in _kvm_update_vmid() 818 * Must be called when the VCPU is migrated to a different CPU to ensure that 832 bool migrated, all; in _kvm_vcpu_load() local 835 * Have we migrated to a different CPU? in _kvm_vcpu_load() 838 migrated = (vcpu->arch.last_sched_cpu != cpu); in _kvm_vcpu_load() 845 all = migrated || (context->last_vcpu != vcpu); in _kvm_vcpu_load()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_svm.c | 1867 * @event: mmu notifier event when range is evicted or migrated 3374 * @migrated: output, true if migration is triggered 3397 bool *migrated) in svm_range_trigger_migration() 3402 *migrated = false; in svm_range_trigger_migration() 3412 *migrated = !r; in svm_range_trigger_migration() 3417 *migrated = !r; in svm_range_trigger_migration() 3570 bool migrated; in svm_range_set_attr() local 3574 r = svm_range_trigger_migration(mm, prange, &migrated); in svm_range_set_attr() 3578 if (migrated && (!p->xnack_enabled || in svm_range_set_attr() 3586 if (!migrated in svm_range_set_attr() 3396 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, bool *migrated) svm_range_trigger_migration() argument [all...] |
/kernel/linux/linux-5.10/arch/arm/vfp/ |
H A D | vfphw.S | 130 @ However, it may have been migrated to another CPU, in which
|
/kernel/linux/linux-6.6/fs/f2fs/ |
H A D | gc.c | 1682 int seg_freed = 0, migrated = 0; in do_garbage_collect() local 1737 migrated >= sbi->migration_granularity) in do_garbage_collect() 1769 migrated++; in do_garbage_collect() 1788 if (migrated) in do_garbage_collect()
|
/kernel/linux/linux-5.10/fs/f2fs/ |
H A D | gc.c | 1588 int seg_freed = 0, migrated = 0; in do_garbage_collect() local 1642 migrated >= sbi->migration_granularity) in do_garbage_collect() 1672 migrated++; in do_garbage_collect()
|
/kernel/linux/linux-5.10/mm/ |
H A D | huge_memory.c | 1420 bool migrated = false; in do_huge_pmd_numa_page() local 1534 migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, in do_huge_pmd_numa_page() 1536 if (migrated) { in do_huge_pmd_numa_page()
|
/kernel/linux/linux-6.6/mm/ |
H A D | huge_memory.c | 1497 bool migrated = false, writable = false; in do_huge_pmd_numa_page() local 1543 migrated = migrate_misplaced_page(page, vma, target_nid); in do_huge_pmd_numa_page() 1544 if (migrated) { in do_huge_pmd_numa_page()
|
/kernel/linux/linux-5.10/tools/perf/ |
H A D | builtin-sched.c | 2448 struct thread *migrated) in timehist_print_migration_event() 2467 timehist_skip_sample(sched, migrated, evsel, sample)) { in timehist_print_migration_event() 2491 printf("migrated: %s", timehist_get_commstr(migrated)); in timehist_print_migration_event() 2506 /* want pid of migrated task not pid in sample */ in timehist_migrate_task_event() 2444 timehist_print_migration_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine, struct thread *migrated) timehist_print_migration_event() argument
|
/kernel/linux/linux-6.6/tools/perf/ |
H A D | builtin-sched.c | 2503 struct thread *migrated) in timehist_print_migration_event() 2522 timehist_skip_sample(sched, migrated, evsel, sample)) { in timehist_print_migration_event() 2546 printf("migrated: %s", timehist_get_commstr(migrated)); in timehist_print_migration_event() 2561 /* want pid of migrated task not pid in sample */ in timehist_migrate_task_event() 2499 timehist_print_migration_event(struct perf_sched *sched, struct evsel *evsel, struct perf_sample *sample, struct machine *machine, struct thread *migrated) timehist_print_migration_event() argument
|