Home
last modified time | relevance | path

Searched refs:v24 (Results 1 - 25 of 97) sorted by relevance

1234

/third_party/node/deps/openssl/config/archs/darwin64-arm64-cc/asm_avx2/crypto/aes/
H A Daesv8-armx.S404 orr v24.16b,v1.16b,v1.16b
409 ld1 {v24.16b},[x0],#16
423 aese v24.16b,v16.16b
424 aesmc v24.16b,v24.16b
435 aese v24.16b,v17.16b
436 aesmc v24.16b,v24.16b
448 aese v24.16b,v16.16b
449 aesmc v24
[all...]
/third_party/node/deps/openssl/config/archs/linux-aarch64/asm/crypto/aes/
H A Daesv8-armx.S404 orr v24.16b,v1.16b,v1.16b
409 ld1 {v24.16b},[x0],#16
423 aese v24.16b,v16.16b
424 aesmc v24.16b,v24.16b
435 aese v24.16b,v17.16b
436 aesmc v24.16b,v24.16b
448 aese v24.16b,v16.16b
449 aesmc v24
[all...]
/third_party/node/deps/openssl/config/archs/linux-aarch64/asm_avx2/crypto/aes/
H A Daesv8-armx.S404 orr v24.16b,v1.16b,v1.16b
409 ld1 {v24.16b},[x0],#16
423 aese v24.16b,v16.16b
424 aesmc v24.16b,v24.16b
435 aese v24.16b,v17.16b
436 aesmc v24.16b,v24.16b
448 aese v24.16b,v16.16b
449 aesmc v24
[all...]
/third_party/node/deps/openssl/config/archs/darwin64-arm64-cc/asm/crypto/aes/
H A Daesv8-armx.S404 orr v24.16b,v1.16b,v1.16b
409 ld1 {v24.16b},[x0],#16
423 aese v24.16b,v16.16b
424 aesmc v24.16b,v24.16b
435 aese v24.16b,v17.16b
436 aesmc v24.16b,v24.16b
448 aese v24.16b,v16.16b
449 aesmc v24
[all...]
/third_party/ffmpeg/libavfilter/aarch64/
H A Dvf_nlmeans_neon.S25 dup v24.4S, v24.S[3] // ...X -> XXXX
27 add v24.4S, v24.4S, \x // XXXX+ABCD={X+A,X+B,X+C,X+D}
28 add v24.4S, v24.4S, v25.4S // {X+A,X+B+A,X+C+B,X+D+C} (+0ABC)
30 add v24.4S, v24.4S, v25.4S // {X+A,X+B+A,X+C+B+A,X+D+C+B} (+00AB)
32 add v24.4S, v24
[all...]
/third_party/node/deps/openssl/config/archs/darwin64-arm64-cc/asm/crypto/sha/
H A Dsha512-armv8.S1114 ld1 {v24.2d},[x3],#16
1122 add v24.2d,v24.2d,v16.2d
1124 ext v24.16b,v24.16b,v24.16b,#8
1127 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
1135 ld1 {v24.2d},[x3],#16
1146 add v24.2d,v24
[all...]
/third_party/node/deps/openssl/config/archs/linux-aarch64/asm_avx2/crypto/sha/
H A Dsha512-armv8.S1114 ld1 {v24.2d},[x3],#16
1122 add v24.2d,v24.2d,v16.2d
1124 ext v24.16b,v24.16b,v24.16b,#8
1127 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
1135 ld1 {v24.2d},[x3],#16
1146 add v24.2d,v24
[all...]
/third_party/node/deps/openssl/config/archs/linux-aarch64/asm/crypto/sha/
H A Dsha512-armv8.S1114 ld1 {v24.2d},[x3],#16
1122 add v24.2d,v24.2d,v16.2d
1124 ext v24.16b,v24.16b,v24.16b,#8
1127 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
1135 ld1 {v24.2d},[x3],#16
1146 add v24.2d,v24
[all...]
/third_party/node/deps/openssl/config/archs/darwin64-arm64-cc/asm_avx2/crypto/sha/
H A Dsha512-armv8.S1114 ld1 {v24.2d},[x3],#16
1122 add v24.2d,v24.2d,v16.2d
1124 ext v24.16b,v24.16b,v24.16b,#8
1127 add v3.2d,v3.2d,v24.2d // "T1 + H + K512[i]"
1135 ld1 {v24.2d},[x3],#16
1146 add v24.2d,v24
[all...]
/third_party/json/include/nlohmann/detail/
H A Dmacro_scope.hpp342 #define NLOHMANN_JSON_PASTE25(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE24(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24)
343 #define NLOHMANN_JSON_PASTE26(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE25(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25)
344 #define NLOHMANN_JSON_PASTE27(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE26(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26)
345 #define NLOHMANN_JSON_PASTE28(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE27(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27)
346 #define NLOHMANN_JSON_PASTE29(func, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28) NLOHMANN_JSON_PASTE2(func, v1) NLOHMANN_JSON_PASTE28(func, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v2
[all...]
/third_party/ffmpeg/libavcodec/aarch64/
H A Dvp9mc_16bpp_neon.S186 smull v24.4s, v16.4h, v0.h[0]
197 extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 1, \size
198 extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 2, \size
199 extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 3, \size
200 extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 4, \size
201 extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 5, \size
202 extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 6, \size
203 extmlal v1, v2, v3, v4, v24, v25, v26, v27, v5, v6, v7, v16, v17, v18, 7, \size
209 sqrshrun v24.4h, v24
[all...]
H A Dh264dsp_neon.S30 mov v24.S[0], w6
42 uxtl v24.8H, v24.8B
44 uxtl v24.4S, v24.4H
46 sli v24.8H, v24.8H, #8
48 sli v24.4S, v24.4S, #16
51 cmlt v23.16B, v24
[all...]
H A Dfft_neon.S81 fmul v24.2s, v17.2s, v28.s[1] // a2r*w,a2i*w
90 fadd v24.2s, v24.2s, v26.2s // a2r+a2i,a2i-a2r t1,t2
94 ext v26.8b, v24.8b, v25.8b, #4
95 ext v27.8b, v25.8b, v24.8b, #4
98 fadd v5.2s, v25.2s, v24.2s
132 fmul v24.2s, v17.2s, v28.s[1] // a2r*w,a2i*w
141 fadd v24.2s, v24.2s, v26.2s // a2r+a2i,a2i-a2r t1,t2
147 ext v26.8b, v24
[all...]
H A Dvp9lpf_neon.S154 // p7 = v16 .. p3 = v20, p0 = v23, q0 = v24, q3 = v27, q7 = v31
179 uabd v7\sz, v24\sz, v25\sz // abs(q0 - q1)
185 uabd v6\sz, v23\sz, v24\sz // abs(p0 - q0)
212 uabd \tmp1\sz, v25\sz, v24\sz // abs(q1 - q0)
213 uabd \tmp2\sz, v26\sz, v24\sz // abs(q2 - q0)
214 uabd \tmp3\sz, v27\sz, v24\sz // abs(q3 - q0)
227 uabd v9\sz, v28\sz, v24\sz // abs(q4 - q0)
229 uabd v10\sz, v29\sz, v24\sz // abs(q5 - q0)
230 uabd v11\sz, v30\sz, v24\sz // abs(q6 - q0)
231 uabd v12\sz, v31\sz, v24\s
[all...]
H A Dvp9lpf_16bpp_neon.S27 // p7 = v16 .. p3 = v20, p0 = v23, q0 = v24, q3 = v27, q7 = v31
38 uabd v7.8h, v24.8h, v25.8h // abs(q0 - q1)
44 uabd v6.8h, v23.8h, v24.8h // abs(p0 - q0)
69 uabd \tmp1\().8h, v25.8h, v24.8h // abs(q1 - q0)
70 uabd \tmp2\().8h, v26.8h, v24.8h // abs(q2 - q0)
71 uabd \tmp3\().8h, v27.8h, v24.8h // abs(q3 - q0)
84 uabd v9.8h, v28.8h, v24.8h // abs(q4 - q0)
86 uabd v10.8h, v29.8h, v24.8h // abs(q5 - q0)
87 uabd v11.8h, v30.8h, v24.8h // abs(q6 - q0)
88 uabd v12.8h, v31.8h, v24
[all...]
H A Dvp9mc_neon.S270 mul v24.8h, v16.8h, v0.h[0]
275 extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, 1, \size
276 extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, 2, \size
277 extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, \idx1, \size
278 extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, 5, \size
279 extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, 6, \size
280 extmla v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, 7, \size
281 extmulqadd v1, v2, v24, v25, v4, v5, v6, v16, v17, v18, \idx2, \size
285 sqrshrun v24.8b, v24
[all...]
H A Dh264idct_neon.S200 add v16.8H, v24.8H, v28.8H
204 sub v17.8H, v24.8H, v28.8H
213 add v16.8H, v24.8H, v28.8H
214 sub v17.8H, v24.8H, v28.8H
220 add v24.8H, v16.8H, v19.8H
247 sub v31.8H, v24.8H, v19.8H
248 add v24.8H, v24.8H, v19.8H
256 sub v31.8H, v24.8H, v19.8H
257 add v24
[all...]
H A Dvp9itxfm_neon.S333 butterfly_8h v24, v25, v16, v22 // v24 = t0, v25 = t3
340 butterfly_8h v16, v23, v24, v30 // v16 = out[0], v23 = out[7]
347 dmbutterfly_l v24, v25, v26, v27, v23, v16, v1.h[1], v1.h[0] // v24,v25 = t1a, v26,v27 = t0a
353 dbutterfly_n v2, v3, v24, v25, v2, v3, v6, v7, v26, v27 // v2 = t1, v3 = t5
354 dbutterfly_n v24, v25, v30, v31, v21, v23, v6, v7, v26, v27 // v24 = t2, v25 = t6
357 butterfly_8h v16, v6, v4, v24 // v16 = out[0], v6 = t2
361 dmbutterfly0 v19, v20, v6, v7, v24, v2
[all...]
H A Dsimple_idct_neon.S160 add v23.4H, v23.4H, v24.4H
162 mov v5.D[0], v24.D[1]
167 idct_col4_top v24, v25, v26, v27, \i, \l
228 idct_row4_neon v24, v25, v26, v27, 1
266 idct_row4_neon v24, v25, v26, v27, 1
285 zip2 v24.2D, v1.2D, v7.2D
297 uaddw2 v24.8H, v24.8H, v19.16B
300 sqxtun2 v23.16B, v24.8H
302 uaddw v24
[all...]
H A Dvp9itxfm_16bpp_neon.S254 mul v24.4s, v17.4s, v0.s[0]
258 srshr v24.4s, v24.4s, #14
262 add \c1\().4s, v24.4s, v20.4s
263 sub \c2\().4s, v24.4s, v20.4s
277 smull v24.2d, v17.2s, v0.s[0]
285 rshrn v24.2s, v24.2d, #14
286 rshrn2 v24.4s, v25.2d, #14
291 add \c1\().4s, v24
[all...]
H A Dhpeldsp_neon.S122 add v24.8H, v16.8H, v18.8H
123 NRND add v24.8H, v24.8H, v26.8H
126 mshrn v28.8B, v24.8H, #2
137 add v24.8H, v16.8H, v18.8H
138 NRND add v24.8H, v24.8H, v26.8H
141 mshrn v30.8B, v24.8H, #2
154 add v24.8H, v16.8H, v18.8H
155 NRND add v24
[all...]
H A Dmdct_neon.S102 fmul v24.2s, v0.2s, v16.2s
106 fsub v4.2s, v4.2s, v24.2s
217 fmul v24.2s, v16.2s, v30.2s // R*c
225 fsub v24.2s, v25.2s, v24.2s // I*s-R*c
254 st2 {v24.s,v25.s}[0], [x10]
255 st2 {v24.s,v25.s}[1], [x6]
269 st2 {v24.s,v25.s}[0], [x10]
270 st2 {v24.s,v25.s}[1], [x6]
301 fmul v24
[all...]
/third_party/node/deps/openssl/openssl/crypto/aes/asm/
H A Daesp8-ppc.pl680 my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
698 stvx v24,r10,$sp
741 ?vperm v24,v30,v31,$keyperm
744 stvx v24,$x00,$key_ # off-load round[1]
752 ?vperm v24,v30,v31,$keyperm
754 stvx v24,$x00,$key_ # off-load round[3]
768 lvx v24,$x00,$key_ # pre-load round[1]
810 vncipher $out0,$out0,v24
811 vncipher $out1,$out1,v24
812 vncipher $out2,$out2,v24
[all...]
/third_party/openssl/crypto/aes/asm/
H A Daesp8-ppc.pl680 my $rndkey0="v23"; # v24-v25 rotating buffer for first found keys
698 stvx v24,r10,$sp
741 ?vperm v24,v30,v31,$keyperm
744 stvx v24,$x00,$key_ # off-load round[1]
752 ?vperm v24,v30,v31,$keyperm
754 stvx v24,$x00,$key_ # off-load round[3]
768 lvx v24,$x00,$key_ # pre-load round[1]
810 vncipher $out0,$out0,v24
811 vncipher $out1,$out1,v24
812 vncipher $out2,$out2,v24
[all...]
/third_party/ffmpeg/libswscale/aarch64/
H A Dyuv2rgb_neon.S130 add v24.8H, v26.8H, v24.8H // Y1 + B1
136 sqrshrun \b1, v24.8H, #1 // clip_u8((Y1 + B1) >> 1)
157 sqdmulh v24.8H, v18.8H, v1.H[3] // U * u2b (B)
162 zip2 v25.8H, v24.8H, v24.8H // B2
163 zip1 v24.8H, v24.8H, v24.8H // B1

Completed in 16 milliseconds

1234