/kernel/linux/linux-5.10/tools/perf/ |
H A D | builtin-timechart.c | 83 * this is because we want to track different programs different, while 410 struct wake_event *we = zalloc(sizeof(*we)); in sched_wakeup() local 412 if (!we) in sched_wakeup() 415 we->time = timestamp; in sched_wakeup() 416 we->waker = waker; in sched_wakeup() 417 we->backtrace = backtrace; in sched_wakeup() 420 we->waker = -1; in sched_wakeup() 422 we->wakee = wakee; in sched_wakeup() 423 we in sched_wakeup() 1037 struct wake_event *we; draw_wakeups() local [all...] |
/kernel/linux/linux-6.6/tools/perf/ |
H A D | builtin-timechart.c | 86 * this is because we want to track different programs different, while 429 struct wake_event *we = zalloc(sizeof(*we)); in sched_wakeup() local 431 if (!we) in sched_wakeup() 434 we->time = timestamp; in sched_wakeup() 435 we->waker = waker; in sched_wakeup() 436 we->backtrace = backtrace; in sched_wakeup() 439 we->waker = -1; in sched_wakeup() 441 we->wakee = wakee; in sched_wakeup() 442 we in sched_wakeup() 1046 struct wake_event *we; draw_wakeups() local [all...] |
/kernel/linux/linux-5.10/arch/arm/lib/ |
H A D | div64.S | 54 @ See if we need to handle upper 32-bit result. 93 @ See if we need to handle lower 32-bit result. 101 @ Here we shift remainer bits leftwards rather than moving the 116 @ Otherwise, if lower part is also null then we are done. 125 clz xh, xl @ we know xh is zero here so... 141 @ If no bit position left then we are done.
|
/kernel/linux/linux-6.6/arch/arm/lib/ |
H A D | div64.S | 54 @ See if we need to handle upper 32-bit result. 93 @ See if we need to handle lower 32-bit result. 101 @ Here we shift remainer bits leftwards rather than moving the 116 @ Otherwise, if lower part is also null then we are done. 125 clz xh, xl @ we know xh is zero here so... 141 @ If no bit position left then we are done.
|
/kernel/linux/linux-5.10/arch/x86/kernel/acpi/ |
H A D | wakeup_32.S | 22 # reload the gdt, as we need the full 32 bit address 38 # jump to place where we left off 82 # In case of S3 failure, we'll emerge here. Jump
|
/kernel/linux/linux-6.6/arch/x86/kernel/acpi/ |
H A D | wakeup_32.S | 22 # reload the gdt, as we need the full 32 bit address 38 # jump to place where we left off 82 # In case of S3 failure, we'll emerge here. Jump
|
/kernel/linux/linux-5.10/arch/c6x/lib/ |
H A D | divu.S | 27 ;; We use a series of up to 31 subc instructions. First, we find 30 ;; to the, and the number of times we have to execute subc. 32 ;; At the end, we have both the remainder and most of the quotient 44 ;; The loop performs a maximum of 28 steps, so we do the
|
H A D | divremu.S | 10 ;; We use a series of up to 31 subc instructions. First, we find 13 ;; to the, and the number of times we have to execute subc. 15 ;; At the end, we have both the remainder and most of the quotient 32 ;; The loop performs a maximum of 28 steps, so we do the
|
/kernel/linux/linux-5.10/arch/arm/vfp/ |
H A D | vfphw.S | 98 @ On UP, we lazily save the VFP context. As a different 103 @ exceptions, so we can get at the 124 @ For SMP, if this thread does not own the hw context, then we 126 @ we always save the state when we switch away from a thread. 144 @ exceptions, so we can get at the 179 ret r9 @ we think we have handled things 215 b VFP_bounce @ we have handled this - the support
|
/kernel/linux/linux-5.10/arch/arm/kernel/ |
H A D | phys2virt.S | 78 @ instructions, where we need to patch in the offset into the 87 @ In the LPAE case, we also need to patch in the high word of the 89 @ to a MVN instruction if the offset is negative. In this case, we 93 @ of i:imm3 != 0b0000, but fortunately, we never need more than 8 lower 131 @ in BE8, we load data in BE, but instructions still in LE 156 @ instructions, where we need to patch in the offset into the 169 @ In the LPAE case, we use a MOVW instruction to carry the low offset
|
/kernel/linux/linux-6.6/arch/arm/kernel/ |
H A D | phys2virt.S | 78 @ instructions, where we need to patch in the offset into the 87 @ In the LPAE case, we also need to patch in the high word of the 89 @ to a MVN instruction if the offset is negative. In this case, we 93 @ of i:imm3 != 0b0000, but fortunately, we never need more than 8 lower 131 @ in BE8, we load data in BE, but instructions still in LE 156 @ instructions, where we need to patch in the offset into the 169 @ In the LPAE case, we use a MOVW instruction to carry the low offset
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gvt/ |
H A D | gtt.c | 167 * table type, as we know l4 root entry doesn't have a PSE bit, 442 * it also works, so we need to treat root pointer entry in gen8_gtt_test_present() 1079 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) in ppgtt_populate_spt_by_guest_entry() 1086 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); in ppgtt_populate_spt_by_guest_entry() 1088 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) in ppgtt_populate_spt_by_guest_entry() 1089 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); in ppgtt_populate_spt_by_guest_entry() 1091 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); in ppgtt_populate_spt_by_guest_entry() 1107 int type = get_next_pt_type(we->type); in ppgtt_populate_spt_by_guest_entry() 1114 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); in ppgtt_populate_spt_by_guest_entry() 1138 spt, we in ppgtt_populate_spt_by_guest_entry() 1078 ppgtt_populate_spt_by_guest_entry( struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) ppgtt_populate_spt_by_guest_entry() argument 1410 ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *we, unsigned long index) ppgtt_handle_guest_entry_add() argument 1615 ppgtt_handle_guest_write_page_table( struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *we, unsigned long index) ppgtt_handle_guest_write_page_table() argument 1740 struct intel_gvt_gtt_entry we, se; ppgtt_handle_guest_write_page_table_bytes() local [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gvt/ |
H A D | gtt.c | 169 * table type, as we know l4 root entry doesn't have a PSE bit, 439 * it also works, so we need to treat root pointer entry in gen8_gtt_test_present() 1070 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) in ppgtt_populate_spt_by_guest_entry() 1077 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); in ppgtt_populate_spt_by_guest_entry() 1079 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) in ppgtt_populate_spt_by_guest_entry() 1080 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); in ppgtt_populate_spt_by_guest_entry() 1082 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); in ppgtt_populate_spt_by_guest_entry() 1098 int type = get_next_pt_type(we->type); in ppgtt_populate_spt_by_guest_entry() 1105 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); in ppgtt_populate_spt_by_guest_entry() 1129 spt, we in ppgtt_populate_spt_by_guest_entry() 1069 ppgtt_populate_spt_by_guest_entry( struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) ppgtt_populate_spt_by_guest_entry() argument 1365 ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *we, unsigned long index) ppgtt_handle_guest_entry_add() argument 1570 ppgtt_handle_guest_write_page_table( struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *we, unsigned long index) ppgtt_handle_guest_write_page_table() argument 1695 struct intel_gvt_gtt_entry we, se; ppgtt_handle_guest_write_page_table_bytes() local [all...] |
/kernel/linux/linux-5.10/arch/arc/kernel/ |
H A D | entry-compact.S | 30 * out. Since we don't do FAKE RTIE for Priv-V, CPU exception state remains 40 * we need to explicitly do this. The problem in macros 80 * across sections (.vector to .text) we are guaranteed that 'j somewhere' 328 # reenabled after we return from interrupt/exception. 336 ; Note that we use realtime STATUS32 (not pt_regs->status32) to 358 ; if L2 IRQ interrupted an L1 ISR, we'd disabled preemption earlier 360 ; undeterministically. Now that we've achieved that, let's reset 372 ; must not be 0 because we would have incremented it. 373 ; If this does happen we simply HALT as it means a BUG !!!
|
/kernel/linux/linux-6.6/arch/arc/kernel/ |
H A D | entry-compact.S | 30 * out. Since we don't do FAKE RTIE for Priv-V, CPU exception state remains 40 * we need to explicitly do this. The problem in macros 80 * across sections (.vector to .text) we are guaranteed that 'j somewhere' 313 # reenabled after we return from interrupt/exception. 321 ; Note that we use realtime STATUS32 (not pt_regs->status32) to 343 ; if L2 IRQ interrupted an L1 ISR, we'd disabled preemption earlier 345 ; undeterministically. Now that we've achieved that, let's reset 357 ; must not be 0 because we would have incremented it. 358 ; If this does happen we simply HALT as it means a BUG !!!
|
/kernel/linux/linux-5.10/arch/alpha/lib/ |
H A D | ev6-clear_user.S | 6 * Zero user space, handling exceptions as we go. 28 * may come along during the execution of this chunk of code, and we don't 29 * want to leave a hole (and we also want to avoid repeating lots of work) 33 /* Allow an exception for an insn; exit if we get one. */ 58 # Note - we never actually use $2, so this is a moot computation 59 # and we can rewrite this later... 86 subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop 92 * We know that we're going to do at least 16 quads, which means we are 94 * Figure out how many quads we nee [all...] |
H A D | ev6-memcpy.S | 57 and $16, 7, $1 # E : Are we at 0mod8 yet? 62 cmple $18, 127, $1 # E : Can we unroll the loop? 80 cmple $18, 127, $1 # E : Can we go through the unrolled loop? 196 bne $1, $aligndest # U : go until we are aligned.
|
H A D | memchr.S | 45 # search til the end of the address space, we will overflow 46 # below when we find the address of the last byte. Given 47 # that we will never have a 56-bit address space, cropping
|
/kernel/linux/linux-5.10/arch/m68k/math-emu/ |
H A D | fp_entry.S | 80 | we jump here after an access error while trying to access 81 | user space, we correct stackpointer and send a SIGSEGV to 95 | send a trace signal if we are debugged 110 | directly, others are on the stack, as we read/write the stack
|
/kernel/linux/linux-6.6/arch/alpha/lib/ |
H A D | ev6-clear_user.S | 6 * Zero user space, handling exceptions as we go. 28 * may come along during the execution of this chunk of code, and we don't 29 * want to leave a hole (and we also want to avoid repeating lots of work) 33 /* Allow an exception for an insn; exit if we get one. */ 58 # Note - we never actually use $2, so this is a moot computation 59 # and we can rewrite this later... 86 subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop 92 * We know that we're going to do at least 16 quads, which means we are 94 * Figure out how many quads we nee [all...] |
H A D | ev6-memcpy.S | 57 and $16, 7, $1 # E : Are we at 0mod8 yet? 62 cmple $18, 127, $1 # E : Can we unroll the loop? 80 cmple $18, 127, $1 # E : Can we go through the unrolled loop? 196 bne $1, $aligndest # U : go until we are aligned.
|
H A D | memchr.S | 45 # search til the end of the address space, we will overflow 46 # below when we find the address of the last byte. Given 47 # that we will never have a 56-bit address space, cropping
|
/kernel/linux/linux-6.6/arch/m68k/math-emu/ |
H A D | fp_entry.S | 80 | we jump here after an access error while trying to access 81 | user space, we correct stackpointer and send a SIGSEGV to 95 | send a trace signal if we are debugged 110 | directly, others are on the stack, as we read/write the stack
|
/kernel/linux/linux-5.10/arch/alpha/kernel/ |
H A D | head.S | 30 /* ... and then we can start the kernel. */ 62 # masking, and we cannot duplicate the effort without causing problems 89 # Putting it here means we dont have to recompile the whole
|
/kernel/linux/linux-6.6/arch/alpha/kernel/ |
H A D | head.S | 30 /* ... and then we can start the kernel. */ 62 # masking, and we cannot duplicate the effort without causing problems 89 # Putting it here means we dont have to recompile the whole
|