Lines Matching refs:z12

312   __ Index(z12.VnB(), 0, -1);
363 ASSERT_EQUAL_SVE_LANE(0x00, z12.VnB(), i);
404 ZRegister mla_dm_result = z12.WithLaneSize(lane_size_in_bits);
862 __ Clasta(z12.VnH(), p4, z12.VnH(), z0.VnH());
906 ASSERT_EQUAL_SVE(z12_expected, z12.VnD());
1148 InsrHelper(&masm, z12.VnD(), z12_inputs);
1152 __ Cmphi(p7.VnD(), p1.Zeroing(), z12.VnD(), z13.VnD());
1184 __ Cmplo(p13.VnD(), p1.Zeroing(), z13.VnD(), z12.VnD()); // HI
1375 InsrHelper(&masm, z12.VnB(), z24_inputs);
1380 __ Orr(z12.VnB(), z12.VnB(), 0x3f);
1432 ASSERT_EQUAL_SVE(z12_expected, z12.VnB());
1467 __ Dup(z12.VnH(), 0x0001);
1495 ASSERT_EQUAL_SVE(0x0001, z12.VnH());
1808 __ Decp(z12.VnS(), p0, z2.VnS());
1871 ASSERT_EQUAL_SVE(z2_expected, z12.VnS());
1956 __ Sqdecp(z12.VnS(), p0, z2.VnS());
2019 ASSERT_EQUAL_SVE(z2_expected, z12.VnS());
2104 __ Uqdecp(z12.VnS(), p0, z2.VnS());
2175 ASSERT_EQUAL_SVE(z2_expected, z12.VnS());
2261 __ Index(z12.VnB(), x0, x1);
2319 ASSERT_EQUAL_SVE_LANE((42 - (3 * i)) & b_mask, z12.VnB(), i);
4415 ASSERT_EQUAL_SVE(z12, z28);
5626 __ Dup(z12.VnS(), z9.VnS(), index[2]);
5700 ASSERT_EQUAL_SVE_LANE(expected_z12, z12.VnS(), i);
5748 __ Sunpkhi(z12.VnD(), z9.VnS());
5792 uint64_t expected = core.zreg_lane<uint64_t>(z12.GetCode(), i);
6655 __ Fcpy(z12.VnD(), pg.Merging(), RawbitsToDouble(0x7ff0000012340000)); // NaN
6739 ASSERT_EQUAL_SVE(expected_z12, z12.VnD());
6749 ASSERT_EQUAL_SVE(z12.VnD(), z16.VnD());
6881 __ Ldr(z12, SVEMemOperand(x0, 2, SVE_MUL_VL));
6915 ASSERT_EQUAL_SVE(z2, z12);
7090 __ Ld1d(z12.VnD(), p5.Zeroing(), SVEMemOperand(x3, x4, LSL, 3));
7120 // Ld1d(z12.VnD(), ...)
7221 ASSERT_EQUAL_SVE(z22, z12);
7427 __ Index(z12.VnH(), 6, -2);
7431 __ St2h(z12.VnH(), z13.VnH(), p6, SVEMemOperand(x0, x2, LSL, 1));
7472 __ Mov(z6.VnH(), p6.Merging(), z12.VnH());
7521 // st2h { z12.h, z13.h }, SVE_VL16
7606 __ Index(z12.VnB(), 3, -3);
7608 __ St3b(z10.VnB(), z11.VnB(), z12.VnB(), p0, SVEMemOperand(x0));
7615 __ Mov(z6.VnB(), p0.Merging(), z12.VnB());
7643 __ Dup(z12.VnS(), 0);
7646 __ Mov(z12.VnS(), p2.Merging(), z0.VnS());
7709 // st3b { z10.b, z11.b, z12.b }, SVE_ALL
7773 ASSERT_EQUAL_SVE(z12, z24);
7812 __ Index(z12.VnB(), -6, 11);
7815 __ St3b(z10.VnB(), z11.VnB(), z12.VnB(), p7, SVEMemOperand(x0, x1, LSL, 0));
7822 __ Mov(z6.VnB(), p7.Merging(), z12.VnB());
7855 __ Dup(z12.VnS(), 0);
7858 __ Mov(z12.VnS(), p5.Merging(), z0.VnS());
7919 // st3b { z10.b, z11.b, z12.b }, SVE_MUL4
7986 ASSERT_EQUAL_SVE(z12, z24);
8027 __ Index(z12.VnB(), 3, -7);
8030 __ St4b(z10.VnB(), z11.VnB(), z12.VnB(), z13.VnB(), p0, SVEMemOperand(x0));
8038 __ Mov(z5.VnB(), p0.Merging(), z12.VnB());
8077 __ Dup(z12.VnS(), 0);
8081 __ Mov(z12.VnS(), p2.Merging(), z30.VnS());
8161 // st2b { z10.b, z11.b, z12.b, z13.b }, SVE_ALL
8234 ASSERT_EQUAL_SVE(z12, z28);
8341 __ Dup(z12.VnS(), 0);
8345 __ Mov(z12.VnS(), p5.Merging(), z30.VnS());
8495 ASSERT_EQUAL_SVE(z12, z28);
8552 __ Dup(z12.VnB(), 0);
8554 __ Mov(z12.VnB(), p0.Merging(), z0.VnB());
8598 ASSERT_EQUAL_SVE(z0, z12);
8648 __ Dup(z12.VnB(), 0);
8650 __ Mov(z12.VnB(), p0.Merging(), z0.VnB());
9503 __ Ldff1sb(z12.VnD(), all.Zeroing(), SVEMemOperand(x2, z29.VnD(), UXTW));
9592 ASSERT_EQUAL_SVE(expected_z12, z12.VnD());
9697 __ Ld1sb(z12.VnD(), all.Zeroing(), SVEMemOperand(x2, z29.VnD(), UXTW));
9772 ASSERT_EQUAL_SVE(expected_z12, z12.VnD());
10375 __ Ld1w(z12.VnS(), p3.Zeroing(), SVEMemOperand(x0, 7, SVE_MUL_VL));
10390 ASSERT_EQUAL_SVE(z12, z13);
10659 ZRegister zn_ld_s = z12.WithLaneSize(esize_in_bits);
11279 __ Fdup(z12.VnS(), 255.0f);
11301 ASSERT_EQUAL_SVE(FloatToRawbits(255.0), z12.VnS());
12060 __ Dup(z12.VnD(), 0);
12066 __ Sdot(z12.VnD(), z12.VnD(), z1.VnH(), z10.VnH(), 0);
12085 ASSERT_EQUAL_SVE(sdot_expected, z12.VnD());
12086 ASSERT_EQUAL_SVE(z12.VnD(), z13.VnD());
13011 __ Lsl(z12.VnD(), p0.Merging(), z0.VnD(), z1.VnD());
13045 ASSERT_EQUAL_SVE(expected_z12, z12.VnD());
13202 __ Asrd(z12.VnD(), p0.Merging(), z31.VnD(), 1);
13236 ASSERT_EQUAL_SVE(expected_z12, z12.VnD());
14461 __ Uzp1(z12.VnH(), z4.VnH(), z5.VnH());
14492 ASSERT_EQUAL_SVE(z0.VnD(), z12.VnD());
14563 __ Mov(z12, z11);
14564 __ Ext(z12.VnB(), z12.VnB(), z12.VnB(), 4);
14565 __ Sel(z12.VnS(), p2, z12.VnS(), z30.VnS());
14573 __ Fcadd(z11.VnS(), p0.Merging(), z11.VnS(), z12.VnS(), 270);
14587 __ Fcadd(z12.VnD(), p0.Merging(), z0.VnD(), z2.VnD(), 90);
14588 __ Fcadd(z12.VnD(), p0.Merging(), z12.VnD(), z28.VnD(), 270);
14605 ASSERT_EQUAL_SVE(z14.VnD(), z12.VnD());
14838 __ Dup(z12.VnH(), z25.VnH(), 0);
14839 FPSegmentPatternHelper(&masm, z12.VnH(), p0.Merging(), z12.VnH());
14840 __ Fmul(z12.VnH(), z1.VnH(), z12.VnH());
14875 ASSERT_EQUAL_SVE(z12.VnH(), z2.VnH());
14931 __ Mov(z12, z4);
14932 __ Ftmad(z12.VnD(), z12.VnD(), z5.VnD(), 0);
14969 ASSERT_EQUAL_SVE(expected_z12, z12.VnD());
15177 __ Mov(z12, z0);
15178 __ Fadd(z12.VnS(), p0m, z12.VnS(), 0.5);
15179 __ Mov(z13, z12);
15242 ASSERT_EQUAL_SVE(expected_z12, z12.VnD());
16687 ZRegister dm_result = z12.WithLaneSize(lane_size_in_bits);
16983 __ Mov(z12, z1);
16984 (masm.*macro_idx)(z12.VnD(), z2.VnD(), z12.VnD(), z0.VnD(), 1); // zd == zn
17049 ASSERT_EQUAL_SVE(z24.VnD(), z12.VnD());
17793 ZRegister zt_fp_2 = z12.WithLaneSize(lane_size_in_bits);
18065 __ Index(z12.WithLaneSize(ds), 42, -1);
18066 (masm.*macro_z)(z12.WithLaneSize(ds), p1.Zeroing(), z28.WithLaneSize(ss));
18093 ASSERT_EQUAL_SVE(z22.WithLaneSize(ls), z12.WithLaneSize(ls));
18444 __ Adr(z12.VnD(), SVEMemOperand(z0.VnD(), z2.VnD(), SXTW, 1));
18482 ASSERT_EQUAL_SVE(expected_z12, z12.VnD());
18561 ZRegister zn_temp = z12.WithLaneSize(esize_in_bits);
18634 __ Prfb(PLDL2STRM, p6, SVEMemOperand(x7, z12.VnS(), UXTW));
18737 __ Index(z12.VnH(), 0, 1);
18739 __ Saba(z13.VnH(), z13.VnH(), z12.VnH(), z13.VnH());
18760 ASSERT_EQUAL_SVE(z12, z13);
18971 __ Ldnt1sw(z12.VnD(), p3.Zeroing(), SVEMemOperand(z30.VnD(), x1));
18984 ASSERT_EQUAL_SVE(z12, z13);
19220 __ Mov(z12, z31);
19221 __ Sqrdcmlah(z12.VnS(), z12.VnS(), z0.VnS(), z1.VnS(), 180);
19244 ASSERT_EQUAL_SVE(zd_180_expected, z12.VnS());
19249 ASSERT_EQUAL_SVE(z16, z12);
19704 __ Usdot(z12.VnS(), z4.VnS(), z11.VnB(), z10.VnB());
19722 ASSERT_EQUAL_SVE(z4, z12);