/third_party/ffmpeg/libavcodec/aarch64/ |
H A D | fft_neon.S | 48 fadd v5.2s, v2.2s, v3.2s // i2+i3,r2+r3 51 fadd v0.2s, v4.2s, v5.2s 52 fsub v2.2s, v4.2s, v5.2s 69 fadd v5.2s, v18.2s, v19.2s // r6+r7,i6+i7 76 ext v6.8b, v4.8b, v5.8b, #4 77 ext v7.8b, v5.8b, v4.8b, #4 92 fadd v7.2s, v4.2s, v5.2s 98 fadd v5.2s, v25.2s, v24.2s 101 fsub v17.2s, v1.2s, v5.2s 104 fadd v1.2s, v1.2s, v5 [all...] |
H A D | hpeldsp_neon.S | 36 ld1 {v5.16B}, [x12], x2 37 urhadd v1.16B, v1.16B, v5.16B 84 ld1 {v5.16B}, [x0] 86 urhadd v3.16B, v3.16B, v5.16B 98 ld1 {v5.16B}, [x0] 100 urhadd v3.16B, v3.16B, v5.16B 112 ld1 {v4.16B, v5.16B}, [x1], x2 115 ext v5.16B, v4.16B, v5.16B, #1 118 uaddl v18.8H, v4.8B, v5 [all...] |
H A D | mdct_neon.S | 50 fmul v5.2s, v17.2s, v3.2s 52 fadd v5.2s, v5.2s, v7.2s 64 st2 {v4.s,v5.s}[0], [x6] 65 st2 {v4.s,v5.s}[1], [x8] 68 st2 {v4.s,v5.s}[0], [x6] 69 st2 {v4.s,v5.s}[1], [x8] 99 fmul v5.2s, v20.2s, v19.2s 105 fadd v5.2s, v5 [all...] |
H A D | vp9itxfm_neon.S | 265 mov v5.16b, v4.16b 272 ld1 {v4.4h,v5.4h,v6.4h,v7.4h}, [x2] 277 sshr v5.4h, v5.4h, #2 282 \txfm1\()4 v4, v5, v6, v7 286 transpose_4x4H v4, v5, v6, v7, v16, v17, v18, v19 288 \txfm2\()4 v4, v5, v6, v7 294 srshr v5.4h, v5.4h, #4 299 uaddw v5 [all...] |
H A D | sbrdsp_neon.S | 77 movi v5.4S, #1<<7, lsl #24 79 eor v1.16B, v1.16B, v5.16B 83 eor v3.16B, v3.16B, v5.16B 86 eor v1.16B, v1.16B, v5.16B 89 eor v3.16B, v3.16B, v5.16B 199 fmul v5.4S, v2.4S, v0.4S 201 faddp v5.4S, v5.4S, v5.4S 203 faddp v5 [all...] |
H A D | me_cmp_neon.S | 36 ld1 {v5.16b}, [x2], x3 // load pix2 41 uabal v16.8h, v1.8b, v5.8b // absolute difference accumulate 42 uabal2 v17.8h, v1.16b, v5.16b 113 uaddl v30.8h, v4.8b, v5.8b // pix3 + pix3+1 0..7 114 uaddl2 v31.8h, v4.16b, v5.16b // pix3 + pix3+1 8..15 128 uaddl2 v5.8h, v18.16b, v19.16b // pix3 + pix3+1 8..15 130 add v29.8h, v3.8h, v5.8h // add up 8..15, using pix2 + pix2+1 values from pix3 above 137 add v31.8h, v5.8h, v3.8h // add up 8..15, using pix2 + pix2+1 values from pix3 above 146 // v4, v5, v7, v18, v19, v24, v25, v27, v29, v31 154 uabd v5 [all...] |
H A D | vc1dsp_neon.S | 33 ld1 {v5.16b, v6.16b}, [x0], #32 41 shl v5.8h, v5.8h, #2 // 8/2 * src[32] 53 ssra v5.8h, v5.8h, #1 // 12/2 * src[32] 59 add v4.8h, v1.8h, v5.8h // t1/2 = 12/2 * src[0] + 12/2 * src[32] 60 sub v1.8h, v1.8h, v5.8h // t2/2 = 12/2 * src[0] - 12/2 * src[32] 63 add v5.8h, v1.8h, v2.8h // t6/2 = t2/2 + t4/2 80 ssra v5.8h, v4.8h, #1 // (t6 + t2) >> 1 87 srshr v4.8h, v5 [all...] |
H A D | vp9itxfm_16bpp_neon.S | 389 mov v5.16b, v4.16b 396 ld1 {v4.4s,v5.4s,v6.4s,v7.4s}, [x2] 401 sshr v5.4s, v5.4s, #2 406 \txfm1\()4_\bpp v4, v5, v6, v7 410 transpose_4x4s v4, v5, v6, v7, v16, v17, v18, v19 412 \txfm2\()4_\bpp v4, v5, v6, v7 419 srshr v5.4s, v5.4s, #4 424 uaddw v5 [all...] |
H A D | h264cmc_neon.S | 60 ld1 {v4.8B, v5.8B}, [x1], x2 63 ext v5.8B, v4.8B, v5.8B, #1 66 umlal v16.8H, v5.8B, v1.8B 68 ld1 {v4.8B, v5.8B}, [x1], x2 71 ext v5.8B, v4.8B, v5.8B, #1 77 umlal v17.8H, v5.8B, v3.8B 136 4: ld1 {v4.8B, v5.8B}, [x1], x2 138 ext v5 [all...] |
H A D | vp8dsp_neon.S | 37 sub v5.4h, v0.4h, v3.4h 40 add v1.4h, v5.4h, v7.4h 42 sub v3.4h, v5.4h, v7.4h 46 transpose_4x4H v0, v1, v2, v3, v4, v5, v6, v7 53 sub v5.4h, v0.4h, v3.4h 55 add v1.4h, v5.4h, v7.4h 57 sub v3.4h, v5.4h, v7.4h 111 transpose_4x4H v0, v1, v2, v3, v24, v5, v6, v7 144 transpose_4x4H v0, v1, v2, v3, v5, v6, v7, v16 188 ld1 {v5 [all...] |
H A D | aacpsdsp_neon.S | 51 zip2 v5.4S, v0.4S, v0.4S 57 fadd v5.4S, v5.4S, v7.4S 61 fmla v2.4S, v3.4S, v5.4S 108 ld2 {v4.2S,v5.2S}, [x1], #16 115 rev64 v5.2S, v5.2S 117 mov v4.D[1], v5.D[0] 118 mov v5.D[1], v2.D[0] 125 fsub v23.4S, v5 [all...] |
H A D | vorbisdsp_neon.S | 33 and v5.16b, v6.16b, v20.16b 34 eor v7.16b, v7.16b, v5.16b 42 and v5.16b, v0.16b, v20.16b 43 eor v1.16b, v1.16b, v5.16b 55 and v5.16b, v6.16b, v20.16b 56 eor v7.16b, v7.16b, v5.16b 72 and v5.16b, v0.16b, v20.16b 73 eor v1.16b, v1.16b, v5.16b
|
/third_party/json/include/nlohmann/detail/ |
H A D | macro_scope.hpp | 323 #define NLOHMANN_JSON_PASTE6(func, v1, v2, v3, v4, v5) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE5(func, v2, v3, v4, v5) 324 #define NLOHMANN_JSON_PASTE7(func, v1, v2, v3, v4, v5, v6) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE6(func, v2, v3, v4, v5, v6) 325 #define NLOHMANN_JSON_PASTE8(func, v1, v2, v3, v4, v5, v6, v7) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE7(func, v2, v3, v4, v5, v6, v7) 326 #define NLOHMANN_JSON_PASTE9(func, v1, v2, v3, v4, v5, v6, v7, v8) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE8(func, v2, v3, v4, v5, v6, v7, v8) 327 #define NLOHMANN_JSON_PASTE10(func, v1, v2, v3, v4, v5, v6, v7, v8, v9) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE9(func, v2, v3, v4, v5, v [all...] |
/third_party/ffmpeg/libavresample/aarch64/ |
H A D | audio_convert_neon.S | 30 fcvtzs v5.4s, v1.4s, #31 38 sqrshrn2 v4.8h, v5.4s, #16 47 fcvtzs v5.4s, v1.4s, #31 56 sqrshrn2 v4.8h, v5.4s, #16 64 sqrshrn2 v4.8h, v5.4s, #16 75 fcvtzs v5.4s, v1.4s, #31 92 sri v7.4s, v5.4s, #16 102 fcvtzs v5.4s, v1.4s, #31 118 sri v7.4s, v5.4s, #16 127 sri v7.4s, v5 [all...] |
/third_party/ffmpeg/libswresample/aarch64/ |
H A D | audio_convert_neon.S | 31 fcvtzs v5.4s, v1.4s, #31 39 sqrshrn2 v4.8h, v5.4s, #16 48 fcvtzs v5.4s, v1.4s, #31 57 sqrshrn2 v4.8h, v5.4s, #16 65 sqrshrn2 v4.8h, v5.4s, #16 77 fcvtzs v5.4s, v1.4s, #31 94 sri v7.4s, v5.4s, #16 104 fcvtzs v5.4s, v1.4s, #31 120 sri v7.4s, v5.4s, #16 129 sri v7.4s, v5 [all...] |
/third_party/googletest/googletest/include/gtest/ |
H A D | gtest_pred_impl.h | 240 const T4& v4, const T5& v5) { in AssertPred5Helper() 241 if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess(); in AssertPred5Helper() 251 << e5 << " evaluates to " << ::testing::PrintToString(v5); in AssertPred5Helper() 256 #define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure) \ 257 GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ 262 #define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure) \ 263 GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, #v1, #v2, #v3, #v4, #v5, \ 264 pred, v1, v2, v3, v4, v5), \ 268 #define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ 236 AssertPred5Helper(const char* pred_text, const char* e1, const char* e2, const char* e3, const char* e4, const char* e5, Pred pred, const T1& v1, const T2& v2, const T3& v3, const T4& v4, const T5& v5) AssertPred5Helper() argument [all...] |
/third_party/node/deps/googletest/include/gtest/ |
H A D | gtest_pred_impl.h | 240 const T4& v4, const T5& v5) { in AssertPred5Helper() 241 if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess(); in AssertPred5Helper() 251 << e5 << " evaluates to " << ::testing::PrintToString(v5); in AssertPred5Helper() 256 #define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure) \ 257 GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ 262 #define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure) \ 263 GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, #v1, #v2, #v3, #v4, #v5, \ 264 pred, v1, v2, v3, v4, v5), \ 268 #define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ 236 AssertPred5Helper(const char* pred_text, const char* e1, const char* e2, const char* e3, const char* e4, const char* e5, Pred pred, const T1& v1, const T2& v2, const T3& v3, const T4& v4, const T5& v5) AssertPred5Helper() argument [all...] |
/third_party/mesa3d/src/gtest/include/gtest/ |
H A D | gtest_pred_impl.h | 309 const T5& v5) { in AssertPred5Helper() 310 if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess(); in AssertPred5Helper() 320 << e5 << " evaluates to " << ::testing::PrintToString(v5); in AssertPred5Helper() 325 #define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ 326 GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ 331 #define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\ 337 #v5, \ 343 v5), on_failure) 346 #define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ 298 AssertPred5Helper(const char* pred_text, const char* e1, const char* e2, const char* e3, const char* e4, const char* e5, Pred pred, const T1& v1, const T2& v2, const T3& v3, const T4& v4, const T5& v5) AssertPred5Helper() argument [all...] |
/third_party/node/deps/openssl/config/archs/linux-aarch64/asm/crypto/modes/ |
H A D | ghashv8-armx.S | 55 pmull v5.1q,v22.1d,v22.1d 62 ext v17.16b,v5.16b,v7.16b,#8 65 eor v4.16b,v5.16b,v7.16b 70 pmull v4.1q,v5.1d,v19.1d 75 ins v6.d[1],v5.d[0] 77 eor v5.16b,v6.16b,v4.16b 80 ext v4.16b,v5.16b,v5.16b,#8 82 pmull v5.1q,v5 [all...] |
/third_party/node/deps/openssl/config/archs/darwin64-arm64-cc/asm/crypto/modes/ |
H A D | ghashv8-armx.S | 55 pmull v5.1q,v22.1d,v22.1d 62 ext v17.16b,v5.16b,v7.16b,#8 65 eor v4.16b,v5.16b,v7.16b 70 pmull v4.1q,v5.1d,v19.1d 75 ins v6.d[1],v5.d[0] 77 eor v5.16b,v6.16b,v4.16b 80 ext v4.16b,v5.16b,v5.16b,#8 82 pmull v5.1q,v5 [all...] |
/third_party/node/deps/openssl/config/archs/darwin64-arm64-cc/asm_avx2/crypto/modes/ |
H A D | ghashv8-armx.S | 55 pmull v5.1q,v22.1d,v22.1d 62 ext v17.16b,v5.16b,v7.16b,#8 65 eor v4.16b,v5.16b,v7.16b 70 pmull v4.1q,v5.1d,v19.1d 75 ins v6.d[1],v5.d[0] 77 eor v5.16b,v6.16b,v4.16b 80 ext v4.16b,v5.16b,v5.16b,#8 82 pmull v5.1q,v5 [all...] |
/third_party/node/deps/openssl/config/archs/linux-aarch64/asm_avx2/crypto/modes/ |
H A D | ghashv8-armx.S | 55 pmull v5.1q,v22.1d,v22.1d 62 ext v17.16b,v5.16b,v7.16b,#8 65 eor v4.16b,v5.16b,v7.16b 70 pmull v4.1q,v5.1d,v19.1d 75 ins v6.d[1],v5.d[0] 77 eor v5.16b,v6.16b,v4.16b 80 ext v4.16b,v5.16b,v5.16b,#8 82 pmull v5.1q,v5 [all...] |
/third_party/ffmpeg/libswscale/aarch64/ |
H A D | hscale.S | 63 ld1 {v5.8H}, [x16], #16 // load 8x16-bit filter values, part 1 67 smlal v0.4S, v4.4H, v5.4H // v0 accumulates srcp[filterPos[0] + {0..3}] * filter[{0..3}] 68 smlal2 v0.4S, v4.8H, v5.8H // v0 accumulates srcp[filterPos[0] + {4..7}] * filter[{4..7}] 106 // v5 madd accumulator 4S 150 movi v5.2D, #0 // Clear madd accumulator for idx 4..7 175 smlal2 v5.4S, v1.8H, v16.8H // multiply accumulate inner loop j = 0, idx = 4..7 176 smlal2 v5.4S, v2.8H, v17.8H // multiply accumulate inner loop j = 1, idx = 4..7 179 smlal2 v5.4S, v3.8H, v18.8H // multiply accumulate inner loop j = 2, idx = 4..7 180 smlal2 v5.4S, v4.8H, v19.8H // multiply accumulate inner loop j = 3, idx = 4..7 186 sqshrn v1.4H, v5 [all...] |
/third_party/mesa3d/src/gallium/drivers/llvmpipe/ |
H A D | lp_setup_rect.c | 492 const float (*v5)[4], in do_rect_ccw() 508 * v5 v4/v2 in do_rect_ccw() 516 rv0 = v5; in do_rect_ccw() 520 } else if (SAME_POS(v1, v5)) { in do_rect_ccw() 528 * v5/v1 v2 in do_rect_ccw() 537 } else if (SAME_POS(v0, v5)) { in do_rect_ccw() 546 * v5/v0 v1 in do_rect_ccw() 554 * v3 v5/v0 in do_rect_ccw() 570 if (SAME_POS(v2, v5)) { in do_rect_ccw() 572 * v3 v5/v in do_rect_ccw() 486 do_rect_ccw(struct lp_setup_context *setup, const float (*v0)[4], const float (*v1)[4], const float (*v2)[4], const float (*v3)[4], const float (*v4)[4], const float (*v5)[4], boolean front) do_rect_ccw() argument 753 setup_rect_cw(struct lp_setup_context *setup, const float (*v0)[4], const float (*v1)[4], const float (*v2)[4], const float (*v3)[4], const float (*v4)[4], const float (*v5)[4]) setup_rect_cw() argument 780 setup_rect_ccw(struct lp_setup_context *setup, const float (*v0)[4], const float (*v1)[4], const float (*v2)[4], const float (*v3)[4], const float (*v4)[4], const float (*v5)[4]) setup_rect_ccw() argument 808 setup_rect_noop(struct lp_setup_context *setup, const float (*v0)[4], const float (*v1)[4], const float (*v2)[4], const float (*v3)[4], const float (*v4)[4], const float (*v5)[4]) setup_rect_noop() argument 821 setup_rect_both(struct lp_setup_context *setup, const float (*v0)[4], const float (*v1)[4], const float (*v2)[4], const float (*v3)[4], const float (*v4)[4], const float (*v5)[4]) setup_rect_both() argument [all...] |
/third_party/vixl/test/aarch64/ |
H A D | test-disasm-neon-aarch64.cc | 352 COMPARE_MACRO(Ld1(v3.M, v4.M, v5.M, MemOperand(x17)), \ in TEST() 353 "ld1 {v3." S ", v4." S ", v5." S "}, [x17]"); \ in TEST() 360 COMPARE_MACRO(Ld3(v3.M, v4.M, v5.M, MemOperand(x17)), \ in TEST() 361 "ld3 {v3." S ", v4." S ", v5." S "}, [x17]"); \ in TEST() 374 COMPARE_MACRO(Ld1(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \ in TEST() 375 "ld1 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \ in TEST() 382 COMPARE_MACRO(Ld3(v3.M, v4.M, v5.M, MemOperand(x17, x22, PostIndex)), \ in TEST() 383 "ld3 {v3." S ", v4." S ", v5." S "}, [x17], x22"); \ in TEST() 397 COMPARE_MACRO(Ld1(v4.V8H(), v5.V8H(), MemOperand(x18, 32, PostIndex)), in TEST() 398 "ld1 {v4.8h, v5 in TEST() [all...] |