Lines Matching refs:vd

1024 void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
1029 movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
1031 movi(vd, byte2, LSL, 8);
1033 movi(vd, byte1);
1035 mvni(vd, ~byte2 & 0xff, LSL, 8);
1037 mvni(vd, ~byte1 & 0xff);
1042 dup(vd, temp);
1047 void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
1064 movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm));
1072 movi(vd, bytes[i], LSL, i * 8);
1081 mvni(vd, ~bytes[i] & 0xff, LSL, i * 8);
1088 movi(vd, bytes[2], MSL, 16);
1094 movi(vd, bytes[1], MSL, 8);
1100 mvni(vd, ~bytes[2] & 0xff, MSL, 16);
1105 mvni(vd, ~bytes[1] & 0xff, MSL, 8);
1111 Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff);
1120 dup(vd, temp);
1125 void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
1137 movi(vd, imm);
1144 Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff);
1153 if (vd.Is1D()) {
1154 fmov(vd.D(), temp);
1156 dup(vd.V2D(), temp);
1162 void MacroAssembler::Movi(const VRegister& vd,
1169 movi(vd, imm, shift, shift_amount);
1170 } else if (vd.Is8B() || vd.Is16B()) {
1173 movi(vd, imm);
1174 } else if (vd.Is4H() || vd.Is8H()) {
1176 Movi16bitHelper(vd, imm);
1177 } else if (vd.Is2S() || vd.Is4S()) {
1179 Movi32bitHelper(vd, imm);
1182 Movi64bitHelper(vd, imm);
1187 void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) {
1189 VIXL_ASSERT(vd.Is128Bits());
1191 Movi(vd.V2D(), lo);
1195 Movi(vd.V1D(), lo);
1203 Ins(vd.V2D(), 1, temp);
1633 void MacroAssembler::Fmov(VRegister vd, double imm) {
1640 fmov(vd.D(), xzr);
1644 if (vd.Is1H() || vd.Is4H() || vd.Is8H()) {
1645 Fmov(vd, Float16(imm));
1649 if (vd.Is1S() || vd.Is2S() || vd.Is4S()) {
1650 Fmov(vd, static_cast<float>(imm));
1654 VIXL_ASSERT(vd.Is1D() || vd.Is2D());
1656 fmov(vd, imm);
1657 } else if (vd.IsScalar()) {
1658 ldr(vd,
1668 Movi(vd, rawbits);
1673 void MacroAssembler::Fmov(VRegister vd, float imm) {
1680 fmov(vd.S(), wzr);
1684 if (vd.Is1H() || vd.Is4H() || vd.Is8H()) {
1685 Fmov(vd, Float16(imm));
1689 if (vd.Is1D() || vd.Is2D()) {
1690 Fmov(vd, static_cast<double>(imm));
1694 VIXL_ASSERT(vd.Is1S() || vd.Is2S() || vd.Is4S());
1696 fmov(vd, imm);
1697 } else if (vd.IsScalar()) {
1698 ldr(vd,
1708 Movi(vd, rawbits);
1713 void MacroAssembler::Fmov(VRegister vd, Float16 imm) {
1717 if (vd.Is1S() || vd.Is2S() || vd.Is4S()) {
1718 Fmov(vd, FPToFloat(imm, kIgnoreDefaultNaN));
1722 if (vd.Is1D() || vd.Is2D()) {
1723 Fmov(vd, FPToDouble(imm, kIgnoreDefaultNaN));
1727 VIXL_ASSERT(vd.Is1H() || vd.Is4H() || vd.Is8H());
1730 fmov(vd, imm);
1732 if (vd.IsScalar()) {
1734 fmov(vd, wzr);
1740 Fmov(vd, temp);
1744 Movi(vd, static_cast<uint64_t>(rawbits));