/kernel/linux/linux-5.10/arch/arm64/kernel/ |
H A D | fpsimd.c | 118 void *sve_state; member 218 kfree(task->thread.sve_state); in __sve_free() 219 task->thread.sve_state = NULL; in __sve_free() 246 * task->thread.sve_state, formatted appropriately for vector 249 * task->thread.sve_state must point to a valid buffer at least 268 * task->thread.sve_state does not need to be non-NULL, valid or any 321 sve_save_state((char *)last->sve_state + in fpsimd_save() 403 #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ 437 * task->thread.sve_state 1126 fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, unsigned int sve_vl) fpsimd_bind_state_to_cpu() argument 1341 char *sve_state = this_cpu_ptr(efi_sve_state); __efi_fpsimd_begin() local 1368 char const *sve_state = this_cpu_ptr(efi_sve_state); __efi_fpsimd_end() local [all...] |
H A D | signal.c | 252 current->thread.sve_state, in preserve_sve_context() 284 * thread.sve_state with preemption enabled, so protection is in restore_sve_fpsimd_context() 290 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ in restore_sve_fpsimd_context() 293 err = __copy_from_user(current->thread.sve_state, in restore_sve_fpsimd_context()
|
H A D | process.c | 364 * Detach src's sve_state (if any) from dst so that it does not in arch_dup_task_struct() 365 * get erroneously used or freed prematurely. dst's sve_state in arch_dup_task_struct() 369 * maintainers it is best not to leave TIF_SVE and sve_state in in arch_dup_task_struct() 372 dst->thread.sve_state = NULL; in arch_dup_task_struct()
|
H A D | ptrace.c | 775 membuf_write(&to, target->thread.sve_state, end - start); in sve_get() 852 * Ensure target->thread.sve_state is up to date with target's in sve_set() 863 target->thread.sve_state, in sve_set()
|
/kernel/linux/linux-6.6/arch/arm64/kvm/ |
H A D | reset.c | 95 * vcpu->arch.sve_state as necessary. 126 vcpu->arch.sve_state = buf; in kvm_vcpu_finalize_sve() 157 void *sve_state = vcpu->arch.sve_state; in kvm_arm_vcpu_destroy() local 161 if (sve_state) in kvm_arm_vcpu_destroy() 162 kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); in kvm_arm_vcpu_destroy() 163 kfree(sve_state); in kvm_arm_vcpu_destroy() 170 memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); in kvm_vcpu_reset_sve()
|
H A D | fpsimd.c | 152 fp_state.sve_state = vcpu->arch.sve_state; in kvm_arch_vcpu_ctxsync_fp()
|
H A D | guest.c | 347 if (WARN_ON(vcpu->arch.sve_state)) in set_sve_vls() 376 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */ in set_sve_vls() 406 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */ 408 unsigned int koffset; /* offset into sve_state in kernel memory */ 502 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, in get_sve_reg() 528 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, in set_sve_reg()
|
/kernel/linux/linux-6.6/arch/arm64/kernel/ |
H A D | fpsimd.c | 302 * allocated in sve_state and sme_state to store the contents of both ZA 349 * task->thread.sve_state does not need to be non-NULL, valid or any 358 * task->thread.sve_state, formatted appropriately for vector 363 * task->thread.sve_state must point to a valid buffer at least 517 sve_save_state((char *)last->sve_state + in fpsimd_save() 632 #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ 666 * task->thread.sve_state. 671 * task->thread.sve_state must point to at least sve_state_size(task) 679 void *sst = task->thread.sve_state; in fpsimd_to_sve() 1643 void *sve_state = NULL; fpsimd_flush_thread() local 1997 char *sve_state = this_cpu_ptr(efi_sve_state); __efi_fpsimd_begin() local 2046 char const *sve_state = this_cpu_ptr(efi_sve_state); __efi_fpsimd_end() local [all...] |
H A D | process.c | 297 * Detach src's sve_state (if any) from dst so that it does not in arch_dup_task_struct() 305 dst->thread.sve_state = NULL; in arch_dup_task_struct() 314 * sve_state allocated whenever sme_state is allocated. in arch_dup_task_struct() 317 dst->thread.sve_state = kzalloc(sve_state_size(src), in arch_dup_task_struct() 319 if (!dst->thread.sve_state) in arch_dup_task_struct() 326 kfree(dst->thread.sve_state); in arch_dup_task_struct() 327 dst->thread.sve_state = NULL; in arch_dup_task_struct()
|
H A D | ptrace.c | 809 membuf_write(&to, target->thread.sve_state, end - start); in sve_get_common() 933 if (!target->thread.sve_state) { in sve_set_common() 941 * Ensure target->thread.sve_state is up to date with target's in sve_set_common() 956 target->thread.sve_state, in sve_set_common() 1102 if (!target->thread.sve_state) { in za_set() 1104 if (!target->thread.sve_state) { in za_set() 1184 if (!target->thread.sve_state) in zt_set()
|
H A D | signal.c | 266 current->thread.sve_state, in preserve_sve_context() 322 * thread.sve_state with preemption enabled, so protection is in restore_sve_fpsimd_context() 328 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ in restore_sve_fpsimd_context() 331 if (!current->thread.sve_state) { in restore_sve_fpsimd_context() 336 err = __copy_from_user(current->thread.sve_state, in restore_sve_fpsimd_context() 476 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ in restore_za_context()
|
/kernel/linux/linux-5.10/arch/arm64/include/asm/ |
H A D | fpsimd.h | 50 void *sve_state, unsigned int sve_vl); 66 return (char *)thread->sve_state + sve_ffr_offset(thread->sve_vl); in sve_pffr()
|
H A D | kvm_host.h | 286 void *sve_state; member 387 #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
|
H A D | processor.h | 145 void *sve_state; /* SVE registers, if any */ member
|
/kernel/linux/linux-5.10/arch/arm64/kvm/ |
H A D | reset.c | 148 * vcpu->arch.sve_state as necessary. 170 vcpu->arch.sve_state = buf; in kvm_vcpu_finalize_sve() 201 kfree(vcpu->arch.sve_state); in kvm_arm_vcpu_destroy() 207 memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); in kvm_vcpu_reset_sve()
|
H A D | fpsimd.c | 89 vcpu->arch.sve_state, in kvm_arch_vcpu_ctxsync_fp()
|
H A D | guest.c | 303 if (WARN_ON(vcpu->arch.sve_state)) in set_sve_vls() 332 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */ in set_sve_vls() 362 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */ 364 unsigned int koffset; /* offset into sve_state in kernel memory */ 458 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, in get_sve_reg() 484 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, in set_sve_reg()
|
/kernel/linux/linux-6.6/arch/arm64/include/asm/ |
H A D | fpsimd.h | 63 void *sve_state; member 105 return (char *)thread->sve_state + sve_ffr_offset(vl); in sve_pffr()
|
H A D | processor.h | 163 void *sve_state; /* SVE registers, if any */ member
|
H A D | kvm_host.h | 480 void *sve_state; member 743 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
|
/kernel/linux/linux-6.6/arch/arm64/kvm/hyp/nvhe/ |
H A D | hyp-main.c | 32 hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state); in flush_hyp_vcpu()
|