| /third_party/openssl/crypto/sha/asm/ |
| H A D | sha512-armv4.pl | 5 # this file except in compliance with the License. You can obtain a copy 53 # h[0-7], namely with most significant dword at *lower* address, which 510 moveq pc,lr @ be binary compatible with V4, yet 511 bx lr @ interoperable with Thumb ISA:-) 668 $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
|
| H A D | sha256-armv4.pl | 5 # this file except in compliance with the License. You can obtain a copy 302 moveq pc,lr @ be binary compatible with V4, yet 303 bx lr @ interoperable with Thumb ISA:-) 737 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
|
| H A D | sha1-armv4-large.pl | 5 # this file except in compliance with the License. You can obtain a copy 32 # with recurring code in two private functions; 43 # instructions with 3 arguments, no [fixed] rotate, addressing 307 moveq pc,lr @ be binary compatible with V4, yet 308 bx lr @ interoperable with Thumb ISA:-) 730 # this fix-up provides Thumb encoding in conjunction with INST 747 s/\bbx\s+lr\b/.word\t0xe12fff1e/o; # make it possible to compile with -march=armv4
|
| /third_party/skia/third_party/externals/libjpeg-turbo/simd/i386/ |
| H A D | jfdctflt-sse.asm | 11 ; This file should be assembled with NASM (Netwide Assembler), 12 ; can *not* be assembled with Microsoft's MASM or any compatible
|
| H A D | jfdctfst-mmx.asm | 11 ; This file should be assembled with NASM (Netwide Assembler), 12 ; can *not* be assembled with Microsoft's MASM or any compatible
|
| H A D | jfdctfst-sse2.asm | 11 ; This file should be assembled with NASM (Netwide Assembler), 12 ; can *not* be assembled with Microsoft's MASM or any compatible
|
| H A D | jidctflt-3dn.asm | 11 ; This file should be assembled with NASM (Netwide Assembler), 12 ; can *not* be assembled with Microsoft's MASM or any compatible
|
| H A D | jidctfst-mmx.asm | 11 ; This file should be assembled with NASM (Netwide Assembler), 12 ; can *not* be assembled with Microsoft's MASM or any compatible
|
| H A D | jidctfst-sse2.asm | 11 ; This file should be assembled with NASM (Netwide Assembler), 12 ; can *not* be assembled with Microsoft's MASM or any compatible
|
| H A D | jidctint-avx2.asm | 11 ; This file should be assembled with NASM (Netwide Assembler), 12 ; can *not* be assembled with Microsoft's MASM or any compatible
|
| /third_party/skia/third_party/externals/libjpeg-turbo/simd/x86_64/ |
| H A D | jfdctfst-sse2.asm | 11 ; This file should be assembled with NASM (Netwide Assembler), 12 ; can *not* be assembled with Microsoft's MASM or any compatible
|
| H A D | jidctflt-sse2.asm | 12 ; This file should be assembled with NASM (Netwide Assembler), 13 ; can *not* be assembled with Microsoft's MASM or any compatible
|
| H A D | jidctfst-sse2.asm | 12 ; This file should be assembled with NASM (Netwide Assembler), 13 ; can *not* be assembled with Microsoft's MASM or any compatible
|
| /third_party/skia/third_party/externals/harfbuzz/src/ |
| H A D | hb-aat-layout-kerx-table.hh | 917 hb_sanitize_with_object_t with (&c->sanitizer, i < count - 1 ? st : (const SubTable *) nullptr); in apply() 957 hb_sanitize_with_object_t with (c, i < count - 1 ? st : (const SubTable *) nullptr); in sanitize()
|
| /third_party/python/Objects/ |
| H A D | bytearrayobject.c | 1208 Return a bytearray with the given prefix string removed if present. 1210 If the bytearray starts with the prefix string, return 1240 Return a bytearray with the given suffix string removed if present. 1242 If the bytearray ends with the suffix string and that suffix is not 1276 Return a copy with each character mapped by the given translation table. 1407 Return a copy with all occurrences of substring old replaced by new. 1711 "can't extend bytearray with %.100s", in bytearray_extend() 1962 The encoding with which to decode the bytearray. 1967 as well as any other name registered with codecs.register_error that 2107 /* use str based reduction for backwards compatibility with Pytho in _common_reduce() [all...] |
| /third_party/zlib/ |
| H A D | configure | 447 echo Building shared library $SHAREDLIBV with $CC. | tee -a configure.log 463 echo Building static library $STATICLIB version $VER with $CC. | tee -a configure.log 630 # (using stdarg or not), with or without "n" (proving size of buffer), and with or without a 631 # return value. The most secure result is vsnprintf() with a return value. snprintf() with a 870 # update Makefile with the configure results 903 # create zlib.pc with the configure results
|
| /third_party/vulkan-loader/loader/ |
| H A D | unknown_ext_chain_gas_x86.S | 7 # you may not use this file except in compliance with the License. 99 mov [esp + 4], ecx # Overwrite the wrapped VkPhysicalDevice with the unwrapped one (on the stack)
|
| H A D | unknown_ext_chain_masm.asm | 7 ; you may not use this file except in compliance with the License. 80 mov [esp + 4], ecx ; Overwrite the wrapped VkPhysicalDevice with the unwrapped one (on the stack)
|
| /kernel/linux/linux-5.10/arch/x86/crypto/ |
| H A D | aesni-intel_avx-x86_64.S | 10 # Redistribution and use in source and binary forms, with or without 19 # documentation and/or other materials provided with the 73 ## AAD padded to 128 bits with 0 90 ## AAD Format with 32-bit Sequence Number 107 ## AAD Format with 64-bit Extended Sequence Number 1217 vpxor TMP1(%rsp), \XMM1, \XMM1 # combine GHASHed value with the corresponding ciphertext 1770 # (from Security Association) concatenated with 8 byte 1772 # concatenated with 0x00000001. 16-byte aligned pointer. */ 1906 vpslldq $4, \GH, \GH # shift-L GH 1 DW (Shift-L 1-DW to obtain result with no shifts) 2168 vpxor TMP1(%rsp), \XMM1, \XMM1 # combine GHASHed value with [all...] |
| /kernel/linux/linux-6.6/arch/x86/crypto/ |
| H A D | aesni-intel_avx-x86_64.S | 10 # Redistribution and use in source and binary forms, with or without 19 # documentation and/or other materials provided with the 73 ## AAD padded to 128 bits with 0 90 ## AAD Format with 32-bit Sequence Number 107 ## AAD Format with 64-bit Extended Sequence Number 1185 vpxor TMP1(%rsp), \XMM1, \XMM1 # combine GHASHed value with the corresponding ciphertext 1738 # (from Security Association) concatenated with 8 byte 1740 # concatenated with 0x00000001. 16-byte aligned pointer. */ 1874 vpslldq $4, \GH, \GH # shift-L GH 1 DW (Shift-L 1-DW to obtain result with no shifts) 2136 vpxor TMP1(%rsp), \XMM1, \XMM1 # combine GHASHed value with [all...] |
| /third_party/node/deps/v8/src/codegen/x64/ |
| H A D | macro-assembler-x64.cc | 174 void TurboAssembler::CompareRoot(Register with, RootIndex index) { in CompareRoot() argument 178 cmp_tagged(with, RootAsOperand(index)); in CompareRoot() 181 cmpq(with, RootAsOperand(index)); in CompareRoot() 185 void TurboAssembler::CompareRoot(Operand with, RootIndex index) { in CompareRoot() argument 187 DCHECK(!with.AddressUsesRegister(kScratchRegister)); in CompareRoot() 191 cmp_tagged(with, kScratchRegister); in CompareRoot() 194 cmpq(with, kScratchRegister); in CompareRoot() 217 // Clear the top two bytes (which may include metadata). Must be in sync with in UnpackMapWord() 370 // Clobber clobbered input registers when running with the debug-code flag in RecordWriteField() 660 // Clobber clobbered registers when running with th in CallRecordWriteStub() [all...] |
| /third_party/node/deps/openssl/openssl/crypto/aes/asm/ |
| H A D | vpaes-armv8.pl | 5 # this file except in compliance with the License. You can obtain a copy 736 // schedule, but with more smearing. The long, high side is 752 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros 774 // schedule, but with an additional "low side" in 872 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros 970 // xor with 0x63 975 // xor with 0x63 1131 eor v7.16b, v7.16b, v0.16b // xor with ivec 1165 eor v0.16b, v0.16b, v6.16b // xor with ivec 1175 eor v0.16b, v0.16b, v6.16b // xor with ive [all...] |
| /third_party/openssl/crypto/aes/asm/ |
| H A D | vpaes-armv8.pl | 5 # this file except in compliance with the License. You can obtain a copy 753 // schedule, but with more smearing. The long, high side is 769 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros 791 // schedule, but with an additional "low side" in 891 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros 989 // xor with 0x63 994 // xor with 0x63 1150 eor v7.16b, v7.16b, v0.16b // xor with ivec 1184 eor v0.16b, v0.16b, v6.16b // xor with ivec 1194 eor v0.16b, v0.16b, v6.16b // xor with ive [all...] |
| /kernel/linux/linux-5.10/arch/arm/boot/compressed/ |
| H A D | head.S | 201 * These 7 nops along with the 1 nop immediately below for 206 * with these 8 nops filling exactly 32 bytes, things still 335 * r4 = final kernel address (possibly with LSB set) 345 * dtb data will get relocated along with the kernel if necessary. 511 * r4 = final kernel address (possibly with LSB set) 535 * Bump to the next 256-byte boundary with the size of 555 * Call __hyp_set_vectors with the new address so that we 579 * Calling dbgkc will help with the printing of this 611 * r4 = kernel execution address (possibly with LSB set) 879 * (This may be the case if running from flash or with randomizatio [all...] |
| /kernel/linux/linux-6.6/arch/arm/boot/compressed/ |
| H A D | head.S | 191 * These 7 nops along with the 1 nop immediately below for 196 * with these 8 nops filling exactly 32 bytes, things still 354 * r4 = final kernel address (possibly with LSB set) 364 * dtb data will get relocated along with the kernel if necessary. 451 * r4 = final kernel address (possibly with LSB set) 475 * Bump to the next 256-byte boundary with the size of 495 * Call __hyp_set_vectors with the new address so that we 519 * Calling dbgkc will help with the printing of this 551 * r4 = kernel execution address (possibly with LSB set) 1079 /* this conflicts with th [all...] |