/third_party/node/deps/base64/base64/lib/arch/ssse3/ |
H A D | enc_loop_asm.c | 7 // Generate a block of inline assembly that loads register R0 from memory. The 9 #define LOAD(R0, ROUND) \ 10 "lddqu ("#ROUND" * 12)(%[src]), %["R0"] \n\t" 13 // R0 using preloaded constants. Outputs in R0 and R1. 14 #define SHUF(R0, R1) \ 15 "pshufb %[lut0], %["R0"] \n\t" \ 16 "movdqa %["R0"], %["R1"] \n\t" \ 17 "pand %[msk0], %["R0"] \n\t" \ 19 "pmulhuw %[msk1], %["R0"] \ [all...] |
/third_party/ltp/tools/sparse/sparse-src/validation/ |
H A D | repeat.h | 1 #define R0(P, S) P(S) macro 2 #define R1(P, S) R0(P,S##0) R0(P,S##1) 3 #define R2(P, S) R0(P,S##0) R0(P,S##1) R0(P,S##2) R0(P,S##3) 4 #define R3(P, S) R0(P,S##0) R0(P,S##1) R0( [all...] |
/third_party/node/deps/base64/base64/lib/arch/avx/ |
H A D | enc_loop_asm.c | 7 // Generate a block of inline assembly that loads register R0 from memory. The 9 #define LOAD(R0, ROUND) \ 10 "vlddqu ("#ROUND" * 12)(%[src]), %["R0"] \n\t" 13 // R0 using preloaded constants. Outputs in R0 and R1. 14 #define SHUF(R0, R1, R2) \ 15 "vpshufb %[lut0], %["R0"], %["R1"] \n\t" \ 22 // Generate a block of inline assembly that takes R0 and R1 and translates 24 #define TRAN(R0, R1, R2) \ 25 "vpsubusb %[n51], %["R1"], %["R0"] \ [all...] |
/third_party/node/deps/base64/base64/lib/arch/avx2/ |
H A D | enc_loop_asm.c | 7 // Generate a block of inline assembly that loads register R0 from memory. The 10 #define LOAD(R0, ROUND, OFFSET) \ 11 "vlddqu ("#ROUND" * 24 + "#OFFSET")(%[src]), %["R0"] \n\t" 14 // R0 using preloaded constants. Outputs in R0 and R1. 15 #define SHUF(R0, R1, R2) \ 16 "vpshufb %[lut0], %["R0"], %["R1"] \n\t" \ 23 // Generate a block of inline assembly that takes R0 and R1 and translates 25 #define TRAN(R0, R1, R2) \ 26 "vpsubusb %[n51], %["R1"], %["R0"] \ [all...] |
/third_party/rust/crates/rustix/src/backend/linux_raw/arch/outline/ |
H A D | x86.rs | 17 use crate::backend::reg::{ArgReg, RetReg, SyscallNumber, A0, A1, A2, A3, A4, A5, R0}; 28 fn rustix_syscall0_nr_last_fastcall(nr: SyscallNumber<'_>) -> RetReg<R0>; in rustix_syscall0_nr_last_fastcall() 29 fn rustix_syscall1_nr_last_fastcall(a0: ArgReg<'_, A0>, nr: SyscallNumber<'_>) -> RetReg<R0>; in rustix_syscall0_nr_last_fastcall() 35 ) -> RetReg<R0>; in rustix_syscall2_nr_last_fastcall() 41 ) -> RetReg<R0>; in rustix_syscall3_nr_last_fastcall() 48 ) -> RetReg<R0>; in rustix_syscall4_nr_last_fastcall() 56 ) -> RetReg<R0>; in rustix_syscall5_nr_last_fastcall() 65 ) -> RetReg<R0>; in rustix_syscall6_nr_last_fastcall() 71 pub(in crate::backend) unsafe fn syscall0(nr: SyscallNumber<'_>) -> RetReg<R0> { 75 pub(in crate::backend) unsafe fn syscall1(nr: SyscallNumber<'_>, a0: ArgReg<'_, A0>) -> RetReg<R0> { [all...] |
H A D | nr_last.rs | 17 use crate::backend::reg::{ArgReg, RetReg, SyscallNumber, A0, A1, A2, A3, A4, A5, R0}; 27 fn rustix_syscall0_nr_last(nr: SyscallNumber<'_>) -> RetReg<R0>; in rustix_syscall0_nr_last() 28 fn rustix_syscall1_nr_last(a0: ArgReg<'_, A0>, nr: SyscallNumber<'_>) -> RetReg<R0>; in rustix_syscall0_nr_last() 34 ) -> RetReg<R0>; in rustix_syscall2_nr_last() 40 ) -> RetReg<R0>; in rustix_syscall3_nr_last() 47 ) -> RetReg<R0>; in rustix_syscall4_nr_last() 55 ) -> RetReg<R0>; in rustix_syscall5_nr_last() 64 ) -> RetReg<R0>; in rustix_syscall6_nr_last() 75 ) -> RetReg<R0>; in rustix_syscall7_nr_last() 81 pub(in crate::backend) unsafe fn syscall0(nr: SyscallNumber<'_>) -> RetReg<R0> { [all...] |
/third_party/node/deps/openssl/openssl/crypto/md4/ |
H A D | md4_dgst.c | 68 R0(A, B, C, D, X(0), 3, 0); in md4_block_data_order() 71 R0(D, A, B, C, X(1), 7, 0); in md4_block_data_order() 74 R0(C, D, A, B, X(2), 11, 0); in md4_block_data_order() 77 R0(B, C, D, A, X(3), 19, 0); in md4_block_data_order() 80 R0(A, B, C, D, X(4), 3, 0); in md4_block_data_order() 83 R0(D, A, B, C, X(5), 7, 0); in md4_block_data_order() 86 R0(C, D, A, B, X(6), 11, 0); in md4_block_data_order() 89 R0(B, C, D, A, X(7), 19, 0); in md4_block_data_order() 92 R0(A, B, C, D, X(8), 3, 0); in md4_block_data_order() 95 R0( in md4_block_data_order() [all...] |
/third_party/node/deps/openssl/openssl/crypto/md5/ |
H A D | md5_dgst.c | 68 R0(A, B, C, D, X(0), 7, 0xd76aa478L); in md5_block_data_order() 71 R0(D, A, B, C, X(1), 12, 0xe8c7b756L); in md5_block_data_order() 74 R0(C, D, A, B, X(2), 17, 0x242070dbL); in md5_block_data_order() 77 R0(B, C, D, A, X(3), 22, 0xc1bdceeeL); in md5_block_data_order() 80 R0(A, B, C, D, X(4), 7, 0xf57c0fafL); in md5_block_data_order() 83 R0(D, A, B, C, X(5), 12, 0x4787c62aL); in md5_block_data_order() 86 R0(C, D, A, B, X(6), 17, 0xa8304613L); in md5_block_data_order() 89 R0(B, C, D, A, X(7), 22, 0xfd469501L); in md5_block_data_order() 92 R0(A, B, C, D, X(8), 7, 0x698098d8L); in md5_block_data_order() 95 R0( in md5_block_data_order() [all...] |
/third_party/openssl/crypto/md5/ |
H A D | md5_dgst.c | 68 R0(A, B, C, D, X(0), 7, 0xd76aa478L); in md5_block_data_order() 71 R0(D, A, B, C, X(1), 12, 0xe8c7b756L); in md5_block_data_order() 74 R0(C, D, A, B, X(2), 17, 0x242070dbL); in md5_block_data_order() 77 R0(B, C, D, A, X(3), 22, 0xc1bdceeeL); in md5_block_data_order() 80 R0(A, B, C, D, X(4), 7, 0xf57c0fafL); in md5_block_data_order() 83 R0(D, A, B, C, X(5), 12, 0x4787c62aL); in md5_block_data_order() 86 R0(C, D, A, B, X(6), 17, 0xa8304613L); in md5_block_data_order() 89 R0(B, C, D, A, X(7), 22, 0xfd469501L); in md5_block_data_order() 92 R0(A, B, C, D, X(8), 7, 0x698098d8L); in md5_block_data_order() 95 R0( in md5_block_data_order() [all...] |
/third_party/openssl/crypto/md4/ |
H A D | md4_dgst.c | 68 R0(A, B, C, D, X(0), 3, 0); in md4_block_data_order() 71 R0(D, A, B, C, X(1), 7, 0); in md4_block_data_order() 74 R0(C, D, A, B, X(2), 11, 0); in md4_block_data_order() 77 R0(B, C, D, A, X(3), 19, 0); in md4_block_data_order() 80 R0(A, B, C, D, X(4), 3, 0); in md4_block_data_order() 83 R0(D, A, B, C, X(5), 7, 0); in md4_block_data_order() 86 R0(C, D, A, B, X(6), 11, 0); in md4_block_data_order() 89 R0(B, C, D, A, X(7), 19, 0); in md4_block_data_order() 92 R0(A, B, C, D, X(8), 3, 0); in md4_block_data_order() 95 R0( in md4_block_data_order() [all...] |
/third_party/mesa3d/src/util/sha1/ |
H A D | sha1.c | 38 * (R0+R1), R2, R3, R4 are the different operations (rounds) used in SHA1 40 #define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30); macro 71 R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3); in SHA1Transform() 72 R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0( in SHA1Transform() [all...] |
/third_party/node/deps/openssl/openssl/crypto/md5/asm/ |
H A D | md5-586.pl | 37 %Ltmp1=("R0",&Np($C), "R1",&Np($C), "R2",&Np($C), "R3",&Np($D)); 39 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, # R0 57 sub R0 subroutine 66 &comment("R0 $ki"); 79 &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0 212 &comment("R0 section"); 214 &R0(-2,$A,$B,$C,$D,$X, 0, 7,0xd76aa478); 215 &R0( 0,$D,$A,$B,$C,$X, 1,12,0xe8c7b756); 216 &R0( 0,$C,$D,$A,$B,$X, 2,17,0x242070db); 217 &R0( [all...] |
/third_party/openssl/crypto/md5/asm/ |
H A D | md5-586.pl | 37 %Ltmp1=("R0",&Np($C), "R1",&Np($C), "R2",&Np($C), "R3",&Np($D)); 39 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, # R0 57 sub R0 subroutine 66 &comment("R0 $ki"); 79 &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0 212 &comment("R0 section"); 214 &R0(-2,$A,$B,$C,$D,$X, 0, 7,0xd76aa478); 215 &R0( 0,$D,$A,$B,$C,$X, 1,12,0xe8c7b756); 216 &R0( 0,$C,$D,$A,$B,$X, 2,17,0x242070db); 217 &R0( [all...] |
/third_party/selinux/libselinux/src/ |
H A D | sha1.c | 52 // (R0+R1), R2, R3, R4 are the different operations used in SHA1 53 #define R0(v,w,x,y,z,i) do { z += ((w&(x^y))^y) + blk0(i)+ 0x5A827999 + rol(v,5); w=rol(w,30); } while (0) macro 91 R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3); in TransformFunction() 92 R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0( in TransformFunction() [all...] |
/third_party/wpa_supplicant/wpa_supplicant-2.9/src/crypto/ |
H A D | sha1-internal.c | 143 /* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */ 144 #define R0(v,w,x,y,z,i) \ macro 198 R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3); in SHA1Transform() 199 R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0( in SHA1Transform() [all...] |
/third_party/wpa_supplicant/wpa_supplicant-2.9_standard/src/crypto/ |
H A D | sha1-internal.c | 143 /* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */ 144 #define R0(v,w,x,y,z,i) \ macro 198 R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3); in SHA1Transform() 199 R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0( in SHA1Transform() [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
H A D | SafeStackLayout.cpp | 112 StackRegion R0 = R; in layoutObject() local 113 R.Start = R0.End = Start; in layoutObject() 114 Regions.insert(&R, R0); in layoutObject() 118 StackRegion R0 = R; in layoutObject() local 119 R0.End = R.Start = End; in layoutObject() 120 Regions.insert(&R, R0); in layoutObject()
|
/third_party/rust/crates/rustix/src/backend/linux_raw/ |
H A D | conv.rs | 28 use super::reg::{raw_arg, ArgNumber, ArgReg, RetReg, R0}; 685 pub(super) unsafe fn ret(raw: RetReg<R0>) -> io::Result<()> { 697 pub(super) unsafe fn ret_error(raw: RetReg<R0>) -> io::Errno { 709 pub(super) unsafe fn ret_infallible(raw: RetReg<R0>) { 721 pub(super) fn ret_c_int(raw: RetReg<R0>) -> io::Result<c::c_int> { 728 pub(super) fn ret_c_uint(raw: RetReg<R0>) -> io::Result<c::c_uint> { 736 pub(super) fn ret_u64(raw: RetReg<R0>) -> io::Result<u64> { 743 pub(super) fn ret_usize(raw: RetReg<R0>) -> io::Result<usize> { 755 pub(super) unsafe fn ret_usize_infallible(raw: RetReg<R0>) -> usize { 774 pub(super) unsafe fn ret_owned_fd(raw: RetReg<R0>) [all...] |
/third_party/ffmpeg/libavcodec/arm/ |
H A D | simple_idct_arm.S | 48 @@ R0-R3 are scratch regs, so no need to save them, but R0 contains the pointer to block 53 @@ at this point, R0=block, other registers are free. 55 @@ add 2 temporary variables in the stack: R0 and R14 60 @@ sp+0 R0 (block) 63 @@ at this point, R0=block, R14=&block[56], R12=__const_ptr_, R1-R11 free 75 @@ at this point, R0=block, R14=&block[n], R12=__const_ptr_, R1=ROWr32[0], R2=ROWr32[1], 87 @@ at this point, R0=block (temp), R1(free), R2=ROWr32[1], R3=ROWr32[2], R4=ROWr32[3], 102 mul r0, r8, r7 @ R0=W1*ROWr16[1]=b0 (ROWr16[1] must be the second arg, to have the possibility to save 1 cycle) 111 mlane r0, r9, r2, r0 @ R0 [all...] |
/third_party/node/deps/openssl/openssl/crypto/poly1305/asm/ |
H A D | poly1305-x86_64.pl | 2133 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24)); 2177 vmovdqu `16*0-64`($ctx),%x#$D0 # will become expanded ${R0} 2188 vpermd $D0,$T2,$R0 # 00003412 -> 14243444 2193 vmovdqa64 $R0,0x00(%rsp){%k2} # save in case $len%128 != 0 2194 vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304 2219 vpmuludq $T0,$R0,$D0 # d0 = r0'*r0 2227 vpmuludq $T1,$R0,$M1 2242 vpmuludq $T2,$R0,$M2 2251 vpmuludq $T3,$R0,$M3 2262 vpmuludq $T4,$R0, [all...] |
/third_party/openssl/crypto/poly1305/asm/ |
H A D | poly1305-x86_64.pl | 2133 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24)); 2177 vmovdqu `16*0-64`($ctx),%x#$D0 # will become expanded ${R0} 2188 vpermd $D0,$T2,$R0 # 00003412 -> 14243444 2193 vmovdqa64 $R0,0x00(%rsp){%k2} # save in case $len%128 != 0 2194 vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304 2219 vpmuludq $T0,$R0,$D0 # d0 = r0'*r0 2227 vpmuludq $T1,$R0,$M1 2242 vpmuludq $T2,$R0,$M2 2251 vpmuludq $T3,$R0,$M3 2262 vpmuludq $T4,$R0, [all...] |
/third_party/skia/third_party/externals/libwebp/src/dsp/ |
H A D | yuv_sse41.c | 47 const __m128i R0 = _mm_mulhi_epu16(*V0, k26149); in ConvertYUV444ToRGB_SSE41() local 49 const __m128i R2 = _mm_add_epi16(R1, R0); in ConvertYUV444ToRGB_SSE41() 135 __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; in VP8YuvToRgb32_SSE41() local 138 YUV444ToRGB_SSE41(y + 0, u + 0, v + 0, &R0, &G0, &B0); in VP8YuvToRgb32_SSE41() 144 rgb0 = _mm_packus_epi16(R0, R1); in VP8YuvToRgb32_SSE41() 157 __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; in VP8YuvToBgr32_SSE41() local 160 YUV444ToRGB_SSE41(y + 0, u + 0, v + 0, &R0, &G0, &B0); in VP8YuvToBgr32_SSE41() 170 bgr4 = _mm_packus_epi16(R0, R1); in VP8YuvToBgr32_SSE41() 185 __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; in YuvToRgbRow_SSE41() local 188 YUV420ToRGB_SSE41(y + 0, u + 0, v + 0, &R0, in YuvToRgbRow_SSE41() 222 __m128i R0, R1, R2, R3, G0, G1, G2, G3, B0, B1, B2, B3; YuvToBgrRow_SSE41() local [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
H A D | ARMCallingConv.cpp | 24 static const MCPhysReg RegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; in f64AssignAPCS() 67 static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 }; in f64AssignAAPCS() 69 static const MCPhysReg ShadowRegList[] = { ARM::R0, ARM::R1 }; in f64AssignAAPCS() 70 static const MCPhysReg GPRArgRegs[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 }; in f64AssignAAPCS() 119 static const MCPhysReg HiRegList[] = { ARM::R0, ARM::R2 }; in f64RetAssign() 156 static const MCPhysReg RRegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
|
/third_party/ffmpeg/libswscale/ppc/ |
H A D | yuv2rgb_altivec.c | 315 vector signed short R0, G0, B0; \ 397 R0 = vec_add(Y0, vx0); \ 404 R = vec_packclp(R0, R1); \ 410 R0 = vec_add(Y2, vx0); \ 416 R = vec_packclp(R0, R1); \ 483 vector signed short R0, G0, B0, R1, G1, B1; in altivec_uyvy_rgb32() local 502 cvtyuvtoRGB(c, Y, U, V, &R0, &G0, &B0); in altivec_uyvy_rgb32() 515 R = vec_packclp(R0, R1); in altivec_uyvy_rgb32() 648 vector signed short R0, G0, B0, R1, G1, B1; in yuv2packedX_altivec() local 711 cvtyuvtoRGB(c, Y0, U0, V0, &R0, in yuv2packedX_altivec() [all...] |
/third_party/node/deps/openssl/config/archs/BSD-x86/asm/crypto/md5/ |
H A D | md5-586.S | 29 # R0 section 32 # R0 0 42 # R0 1 52 # R0 2 62 # R0 3 72 # R0 4 82 # R0 5 92 # R0 6 102 # R0 7 112 # R0 [all...] |