Searched refs:mm_cid (Results 1 - 12 of 12) sorted by relevance
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | rseq.h | 20 __field(s32, mm_cid) 26 __entry->mm_cid = task_mm_cid(t); 29 TP_printk("cpu_id=%d node_id=%d mm_cid=%d", __entry->cpu_id, 30 __entry->node_id, __entry->mm_cid)
|
/kernel/linux/linux-6.6/kernel/ |
H A D | rseq.c | 93 u32 mm_cid = task_mm_cid(t); in rseq_update_cpu_node_id() local 95 WARN_ON_ONCE((int) mm_cid < 0); in rseq_update_cpu_node_id() 101 unsafe_put_user(mm_cid, &rseq->mm_cid, efault_end); in rseq_update_cpu_node_id() 120 mm_cid = 0; in rseq_reset_rseq_cpu_node_id() local 140 * Reset mm_cid to its initial state (0). in rseq_reset_rseq_cpu_node_id() 142 if (put_user(mm_cid, &t->rseq->mm_cid)) in rseq_reset_rseq_cpu_node_id()
|
H A D | fork.c | 1209 tsk->mm_cid = -1; in dup_task_struct()
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | mm_types.h | 667 struct mm_cid { struct 736 * Keep track of the currently allocated mm_cid for each cpu. 737 * The per-cpu mm_cid values are serialized by their respective 740 struct mm_cid __percpu *pcpu_cid; 742 * @mm_cid_next_scan: Next mm_cid scan (in jiffies). 744 * When the next mm_cid scan is due (in jiffies). 1088 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i); in mm_init_cid() 1098 mm->pcpu_cid = alloc_percpu(struct mm_cid); in mm_alloc_cid()
|
H A D | sched.h | 1437 int mm_cid; /* Current cid in mm */ member
|
H A D | mm.h | 2326 return t->mm_cid; in task_mm_cid()
|
/kernel/linux/linux-6.6/include/uapi/linux/ |
H A D | rseq.h | 143 * Restartable sequences mm_cid field. Updated by the kernel. Read by 149 __u32 mm_cid; member
|
/kernel/linux/linux-6.6/tools/testing/selftests/rseq/ |
H A D | rseq-bits-template.h | 20 # define RSEQ_TEMPLATE_CPU_ID_FIELD mm_cid
|
H A D | rseq-abi.h | 159 * Restartable sequences mm_cid field. Updated by the kernel. Read by 165 __u32 mm_cid; member
|
H A D | rseq.h | 210 return (int) rseq_feature_size >= rseq_offsetofend(struct rseq_abi, mm_cid); in rseq_mm_cid_available() 215 return RSEQ_ACCESS_ONCE(rseq_get_abi()->mm_cid); in rseq_current_mm_cid()
|
/kernel/linux/linux-6.6/kernel/sched/ |
H A D | sched.h | 3526 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; in mm_cid_put_lazy() 3539 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; in mm_cid_pcpu_unset() 3597 struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(rq)); in mm_cid_snapshot_time() 3657 struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; in mm_cid_get() 3716 prev->mm_cid = -1; in switch_mm_cid() 3719 next->last_mm_cid = next->mm_cid = mm_cid_get(rq, next->mm); in switch_mm_cid()
|
H A D | core.c | 12366 * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid 12461 struct mm_cid *src_pcpu_cid) in __sched_mm_cid_migrate_from_fetch_cid() 12502 struct mm_cid *src_pcpu_cid, in __sched_mm_cid_migrate_from_try_steal_cid() 12566 struct mm_cid *src_pcpu_cid, *dst_pcpu_cid; in sched_mm_cid_migrate_to() 12617 static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid, in sched_mm_cid_remote_clear() 12679 struct mm_cid *pcpu_cid; in sched_mm_cid_remote_clear_old() 12712 struct mm_cid *pcpu_cid; in sched_mm_cid_remote_clear_weight() 12813 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_exit_signals() 12837 t->last_mm_cid = t->mm_cid = -1; in sched_mm_cid_before_execve() 12860 t->last_mm_cid = t->mm_cid in sched_mm_cid_after_execve() [all...] |
Completed in 38 milliseconds