/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | assembler-arm64.cc | 392 // As a crutch to avoid having to add manual Align calls wherever we use a in GetCode() 393 // raw workflow to create Code objects (mostly in tests), add another Align in GetCode() 838 void Assembler::add(const Register& rd, const Register& rn, in add() function in v8::internal::Assembler 1868 // Moves involving the stack pointer are encoded as add immediate with in mov() 1872 add(rd, rm, 0); in mov() 3058 V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \ 3647 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ] in AddSub() 3648 // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ] in AddSub() 3649 // add/sub wsp, wsp, <Wm> [, LSL #0-3 ] in AddSub() 3652 // extended register mode, and emit an add/su in AddSub() [all...] |
/third_party/node/deps/v8/src/codegen/riscv64/ |
H A D | assembler-riscv64.cc | 243 // As a crutch to avoid having to add manual Align calls wherever we use a in GetCode() 244 // raw workflow to create Code objects (mostly in tests), add another Align in GetCode() 1635 void Assembler::add(Register rd, Register rs1, Register rs2) { in add() function in v8::internal::Assembler 3268 // Positive number, but overflow because of the add 0x800 in RV_li() 3299 add(rd, rd, temp_reg); in RV_li() 3304 // Build upper 32 bits first in rd. Divide lower 32 bits parts and add in RV_li() 3305 // parts to the upper part by doing shift and add. in RV_li() 3401 // Positive number, but overflow because of the add 0x800 in li_estimate() 3432 // Build upper 32 bits first in rd. Divide lower 32 bits parts and add in li_estimate() 3433 // parts to the upper part by doing shift and add in li_estimate() [all...] |
H A D | macro-assembler-riscv64.h | 1346 add(scratch2, scratch2, in GenerateSwitchTable()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/ |
H A D | ScalarEvolution.cpp | 215 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, 1532 // Try to prove away overflow by looking at "nearby" add recurrences. A 1592 // Give up if we don't already have the add recurrence we need because in proveNoWrapByVaryingStart() 1593 // actually constructing an add recurrence is relatively expensive. in proveNoWrapByVaryingStart() 1885 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))). in getZeroExtendExpr() 2254 /// that would form an add expression like this: 2262 /// and add 13 + A*B*29 to AccumulatedConstant. 2283 // Iterate over the add operands. They are sorted, with constants first. in CollectAddOperandsWithScales() 2301 // A multiplication of a constant with another add; recurse. in CollectAddOperandsWithScales() 2412 /// Get a canonical add expressio [all...] |
/third_party/vixl/test/aarch64/ |
H A D | test-assembler-aarch64.cc | 7739 __ add(xzr, x0, x1); 7740 __ add(xzr, x1, xzr); 7741 __ add(xzr, xzr, x1); 10895 AtomicMemoryLoadSignature loads[] = MAKE_LOADS(add); 10896 AtomicMemoryStoreSignature stores[] = MAKE_STORES(add); 10897 AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(add); 10898 AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(add); 10899 AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(add); 10900 AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(add); 11240 V(DEF, add) \ [all...] |
/third_party/vixl/src/aarch64/ |
H A D | macro-assembler-aarch64.h | 287 // for example if we decide to add nops between the veneers. 695 // Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1. 701 kLimitShiftForSP, // Limit pre-shift for add/sub extend use. 1869 V(DEF, MASM_PRE##add, ASM_PRE##add) \ 2216 // If the sp is an operand, add #0 is emitted, otherwise, orr #0. in Mov() 2850 V(add, Add) \ 3221 V(add, Add) \ 3657 add(zd, zn, zm); in Add() 8121 // Will add register [all...] |
/third_party/icu/icu4c/source/common/ |
H A D | locid.cpp | 731 rawIndexes[i] = strings->add(aliasTo, status); in readAlias()
|
H A D | unames.cpp | 1742 /* add each UChar to the USet */ in charSetToUSet() 1745 sa->add(sa->set, us[i]); in charSetToUSet()
|
/third_party/node/deps/openssl/config/archs/linux-armv4/asm_avx2/crypto/aes/ |
H A D | bsaes-armv7.S | 96 add r6,r6,#.LM0ISR-_bsaes_decrypt8 1105 add r12, #96 @ sifze of bit-slices key schedule 1125 add r12, r3, #248 @ pass key schedule 1127 add r4, r3, #248 1150 add r4, r3, #248 1194 add r4, r3, #248 1337 add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb 1361 add r12, #96 @ size of bit-sliced key schedule 1374 add r [all...] |
/third_party/node/deps/openssl/config/archs/linux-armv4/asm/crypto/aes/ |
H A D | bsaes-armv7.S | 96 add r6,r6,#.LM0ISR-_bsaes_decrypt8 1105 add r12, #96 @ sifze of bit-slices key schedule 1125 add r12, r3, #248 @ pass key schedule 1127 add r4, r3, #248 1150 add r4, r3, #248 1194 add r4, r3, #248 1337 add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb 1361 add r12, #96 @ size of bit-sliced key schedule 1374 add r [all...] |
/third_party/node/ |
H A D | configure.py | 142 help='add gdb support') 1399 now a runtime option of Node.js. Run `node --use-largepages` or add 1757 locs.add('root') # must have root
|
/third_party/node/deps/icu-small/source/common/ |
H A D | locid.cpp | 731 rawIndexes[i] = strings->add(aliasTo, status); in readAlias() 1662 // do not add "" or duplicate data to variants 1668 // do not add "" or duplicate data to variants
|
H A D | unames.cpp | 1742 /* add each char16_t to the USet */ in charSetToUSet() 1745 sa->add(sa->set, us[i]); in charSetToUSet()
|
/third_party/icu/icu4c/source/test/intltest/ |
H A D | convtest.cpp | 575 expected.add(0, cpLimit-1); in TestGetUnicodeSet2() 1633 // TODO change with Jitterbug 2401, then add a similar call for toUnicode too in FromUnicodeCase()
|
/third_party/littlefs/scripts/ |
H A D | plot.py | 533 ks.add(tuple(r.get(k, '') for k in by))
|
H A D | test.py | 808 children.add(proc)
|
/third_party/node/deps/v8/src/codegen/ppc/ |
H A D | assembler-ppc.h | 884 void add(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
|
/third_party/node/deps/openssl/openssl/crypto/sha/asm/ |
H A D | sha1-mb-x86_64.pl | 1475 add \$64,%rsp
|
/third_party/node/deps/openssl/openssl/crypto/whrlpool/asm/ |
H A D | wp-mmx.pl | 200 &add ("esi",1);
|
/third_party/node/deps/openssl/openssl/crypto/aes/asm/ |
H A D | vpaes-ppc.pl | 1119 addi $out, $out, 16 # add \$16, %rdx 1140 addi $out, $out, -16 # add \$-16, %rdx 1261 # add in smeared stuff 1321 addi $out, $out, 16 # add \$16, %rdx 1330 addi r8, r8, -16 # add \$-16, %r8 1378 addi $out, $out, -16 # add \$-16, %rdx 1381 addi r8, r8, -16 # add \$-16, %r8 1429 addi r9, r9, 6 # add \$5,%eax 1507 addi r9, r9, 6 # add \$5,%eax 1511 add [all...] |
/third_party/python/Lib/test/support/ |
H A D | __init__.py | 789 # add GC header size 1621 expected.add(name) 1745 Also, add a cleanup procedure to 'test_instance' to restore
|
/third_party/python/Lib/ |
H A D | subprocess.py | 557 # _after_ kill() is required to collect that and add it 1882 fds_to_keep.add(errpipe_write)
|
/third_party/python/Lib/asyncio/ |
H A D | base_events.py | 537 self._asyncgens.add(agen) 1900 # All other places just add them to ready.
|
/third_party/openssl/crypto/aes/asm/ |
H A D | vpaes-ppc.pl | 1119 addi $out, $out, 16 # add \$16, %rdx 1140 addi $out, $out, -16 # add \$-16, %rdx 1261 # add in smeared stuff 1321 addi $out, $out, 16 # add \$16, %rdx 1330 addi r8, r8, -16 # add \$-16, %r8 1378 addi $out, $out, -16 # add \$-16, %rdx 1381 addi r8, r8, -16 # add \$-16, %r8 1429 addi r9, r9, 6 # add \$5,%eax 1507 addi r9, r9, 6 # add \$5,%eax 1511 add [all...] |
/third_party/vk-gl-cts/external/vulkancts/modules/vulkan/compute/ |
H A D | vktComputeCooperativeMatrixTests.cpp | 797 programCollection.glslSources.add("test") << glu::ComputeSource(css.str()) << buildOptions; in initPrograms() 1743 { TT_ADD, "add"}, in createCooperativeMatrixTestsInternal()
|