162306a36Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0-only */ 262306a36Sopenharmony_ci/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */ 362306a36Sopenharmony_ci 462306a36Sopenharmony_ci#include <linux/linkage.h> 562306a36Sopenharmony_ci#include <asm/asm.h> 662306a36Sopenharmony_ci 762306a36Sopenharmony_ci#ifndef CONFIG_UML 862306a36Sopenharmony_ci 962306a36Sopenharmony_ci#ifdef CONFIG_X86_MCE 1062306a36Sopenharmony_ci 1162306a36Sopenharmony_ci/* 1262306a36Sopenharmony_ci * copy_mc_fragile - copy memory with indication if an exception / fault happened 1362306a36Sopenharmony_ci * 1462306a36Sopenharmony_ci * The 'fragile' version is opted into by platform quirks and takes 1562306a36Sopenharmony_ci * pains to avoid unrecoverable corner cases like 'fast-string' 1662306a36Sopenharmony_ci * instruction sequences, and consuming poison across a cacheline 1762306a36Sopenharmony_ci * boundary. The non-fragile version is equivalent to memcpy() 1862306a36Sopenharmony_ci * regardless of CPU machine-check-recovery capability. 1962306a36Sopenharmony_ci */ 2062306a36Sopenharmony_ciSYM_FUNC_START(copy_mc_fragile) 2162306a36Sopenharmony_ci cmpl $8, %edx 2262306a36Sopenharmony_ci /* Less than 8 bytes? Go to byte copy loop */ 2362306a36Sopenharmony_ci jb .L_no_whole_words 2462306a36Sopenharmony_ci 2562306a36Sopenharmony_ci /* Check for bad alignment of source */ 2662306a36Sopenharmony_ci testl $7, %esi 2762306a36Sopenharmony_ci /* Already aligned */ 2862306a36Sopenharmony_ci jz .L_8byte_aligned 2962306a36Sopenharmony_ci 3062306a36Sopenharmony_ci /* Copy one byte at a time until source is 8-byte aligned */ 3162306a36Sopenharmony_ci movl %esi, %ecx 3262306a36Sopenharmony_ci andl $7, %ecx 3362306a36Sopenharmony_ci subl $8, %ecx 3462306a36Sopenharmony_ci negl %ecx 3562306a36Sopenharmony_ci subl %ecx, %edx 3662306a36Sopenharmony_ci.L_read_leading_bytes: 3762306a36Sopenharmony_ci movb (%rsi), %al 3862306a36Sopenharmony_ci.L_write_leading_bytes: 3962306a36Sopenharmony_ci movb %al, (%rdi) 4062306a36Sopenharmony_ci incq %rsi 4162306a36Sopenharmony_ci incq %rdi 4262306a36Sopenharmony_ci decl %ecx 4362306a36Sopenharmony_ci jnz .L_read_leading_bytes 4462306a36Sopenharmony_ci 4562306a36Sopenharmony_ci.L_8byte_aligned: 4662306a36Sopenharmony_ci movl %edx, %ecx 4762306a36Sopenharmony_ci andl $7, %edx 4862306a36Sopenharmony_ci shrl $3, %ecx 4962306a36Sopenharmony_ci jz .L_no_whole_words 5062306a36Sopenharmony_ci 5162306a36Sopenharmony_ci.L_read_words: 5262306a36Sopenharmony_ci movq (%rsi), %r8 5362306a36Sopenharmony_ci.L_write_words: 5462306a36Sopenharmony_ci movq %r8, (%rdi) 5562306a36Sopenharmony_ci addq $8, %rsi 5662306a36Sopenharmony_ci addq $8, %rdi 5762306a36Sopenharmony_ci decl %ecx 5862306a36Sopenharmony_ci jnz .L_read_words 5962306a36Sopenharmony_ci 6062306a36Sopenharmony_ci /* Any trailing bytes? */ 6162306a36Sopenharmony_ci.L_no_whole_words: 6262306a36Sopenharmony_ci andl %edx, %edx 6362306a36Sopenharmony_ci jz .L_done_memcpy_trap 6462306a36Sopenharmony_ci 6562306a36Sopenharmony_ci /* Copy trailing bytes */ 6662306a36Sopenharmony_ci movl %edx, %ecx 6762306a36Sopenharmony_ci.L_read_trailing_bytes: 6862306a36Sopenharmony_ci movb (%rsi), %al 6962306a36Sopenharmony_ci.L_write_trailing_bytes: 7062306a36Sopenharmony_ci movb %al, (%rdi) 7162306a36Sopenharmony_ci incq %rsi 7262306a36Sopenharmony_ci incq %rdi 7362306a36Sopenharmony_ci decl %ecx 7462306a36Sopenharmony_ci jnz .L_read_trailing_bytes 7562306a36Sopenharmony_ci 7662306a36Sopenharmony_ci /* Copy successful. Return zero */ 7762306a36Sopenharmony_ci.L_done_memcpy_trap: 7862306a36Sopenharmony_ci xorl %eax, %eax 7962306a36Sopenharmony_ci.L_done: 8062306a36Sopenharmony_ci RET 8162306a36Sopenharmony_ci 8262306a36Sopenharmony_ci /* 8362306a36Sopenharmony_ci * Return number of bytes not copied for any failure. Note that 8462306a36Sopenharmony_ci * there is no "tail" handling since the source buffer is 8-byte 8562306a36Sopenharmony_ci * aligned and poison is cacheline aligned. 8662306a36Sopenharmony_ci */ 8762306a36Sopenharmony_ci.E_read_words: 8862306a36Sopenharmony_ci shll $3, %ecx 8962306a36Sopenharmony_ci.E_leading_bytes: 9062306a36Sopenharmony_ci addl %edx, %ecx 9162306a36Sopenharmony_ci.E_trailing_bytes: 9262306a36Sopenharmony_ci mov %ecx, %eax 9362306a36Sopenharmony_ci jmp .L_done 9462306a36Sopenharmony_ci 9562306a36Sopenharmony_ci /* 9662306a36Sopenharmony_ci * For write fault handling, given the destination is unaligned, 9762306a36Sopenharmony_ci * we handle faults on multi-byte writes with a byte-by-byte 9862306a36Sopenharmony_ci * copy up to the write-protected page. 9962306a36Sopenharmony_ci */ 10062306a36Sopenharmony_ci.E_write_words: 10162306a36Sopenharmony_ci shll $3, %ecx 10262306a36Sopenharmony_ci addl %edx, %ecx 10362306a36Sopenharmony_ci movl %ecx, %edx 10462306a36Sopenharmony_ci jmp copy_mc_fragile_handle_tail 10562306a36Sopenharmony_ci 10662306a36Sopenharmony_ci _ASM_EXTABLE_TYPE(.L_read_leading_bytes, .E_leading_bytes, EX_TYPE_DEFAULT_MCE_SAFE) 10762306a36Sopenharmony_ci _ASM_EXTABLE_TYPE(.L_read_words, .E_read_words, EX_TYPE_DEFAULT_MCE_SAFE) 10862306a36Sopenharmony_ci _ASM_EXTABLE_TYPE(.L_read_trailing_bytes, .E_trailing_bytes, EX_TYPE_DEFAULT_MCE_SAFE) 10962306a36Sopenharmony_ci _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes) 11062306a36Sopenharmony_ci _ASM_EXTABLE(.L_write_words, .E_write_words) 11162306a36Sopenharmony_ci _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes) 11262306a36Sopenharmony_ci 11362306a36Sopenharmony_ciSYM_FUNC_END(copy_mc_fragile) 11462306a36Sopenharmony_ci#endif /* CONFIG_X86_MCE */ 11562306a36Sopenharmony_ci 11662306a36Sopenharmony_ci/* 11762306a36Sopenharmony_ci * copy_mc_enhanced_fast_string - memory copy with exception handling 11862306a36Sopenharmony_ci * 11962306a36Sopenharmony_ci * Fast string copy + fault / exception handling. If the CPU does 12062306a36Sopenharmony_ci * support machine check exception recovery, but does not support 12162306a36Sopenharmony_ci * recovering from fast-string exceptions then this CPU needs to be 12262306a36Sopenharmony_ci * added to the copy_mc_fragile_key set of quirks. Otherwise, absent any 12362306a36Sopenharmony_ci * machine check recovery support this version should be no slower than 12462306a36Sopenharmony_ci * standard memcpy. 12562306a36Sopenharmony_ci */ 12662306a36Sopenharmony_ciSYM_FUNC_START(copy_mc_enhanced_fast_string) 12762306a36Sopenharmony_ci movq %rdi, %rax 12862306a36Sopenharmony_ci movq %rdx, %rcx 12962306a36Sopenharmony_ci.L_copy: 13062306a36Sopenharmony_ci rep movsb 13162306a36Sopenharmony_ci /* Copy successful. Return zero */ 13262306a36Sopenharmony_ci xorl %eax, %eax 13362306a36Sopenharmony_ci RET 13462306a36Sopenharmony_ci 13562306a36Sopenharmony_ci.E_copy: 13662306a36Sopenharmony_ci /* 13762306a36Sopenharmony_ci * On fault %rcx is updated such that the copy instruction could 13862306a36Sopenharmony_ci * optionally be restarted at the fault position, i.e. it 13962306a36Sopenharmony_ci * contains 'bytes remaining'. A non-zero return indicates error 14062306a36Sopenharmony_ci * to copy_mc_generic() users, or indicate short transfers to 14162306a36Sopenharmony_ci * user-copy routines. 14262306a36Sopenharmony_ci */ 14362306a36Sopenharmony_ci movq %rcx, %rax 14462306a36Sopenharmony_ci RET 14562306a36Sopenharmony_ci 14662306a36Sopenharmony_ci _ASM_EXTABLE_TYPE(.L_copy, .E_copy, EX_TYPE_DEFAULT_MCE_SAFE) 14762306a36Sopenharmony_ci 14862306a36Sopenharmony_ciSYM_FUNC_END(copy_mc_enhanced_fast_string) 14962306a36Sopenharmony_ci#endif /* !CONFIG_UML */ 150