Lines Matching defs:src
8 #include "src/base/macros.h"
9 #include "src/codegen/cpu-features.h"
10 #include "src/codegen/external-reference.h"
11 #include "src/codegen/turbo-assembler.h"
14 #include "src/codegen/ia32/register-ia32.h"
16 #include "src/codegen/x64/register-x64.h"
43 void Move(Register dst, uint32_t src);
45 void Move(Register dst, Register src);
46 void Add(Register dst, Immediate src);
47 void And(Register dst, Immediate src);
72 void Pshufb(XMMRegister dst, XMMRegister src, Op mask) {
75 vpshufb(dst, src, mask);
79 if (dst != src) {
80 movaps(dst, src);
122 // check that dst == first src.
402 void F64x2ExtractLane(DoubleRegister dst, XMMRegister src, uint8_t lane);
403 void F64x2ReplaceLane(XMMRegister dst, XMMRegister src, DoubleRegister rep,
409 void F32x4Splat(XMMRegister dst, DoubleRegister src);
410 void F32x4ExtractLane(FloatRegister dst, XMMRegister src, uint8_t lane);
415 void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
416 void I8x16Splat(XMMRegister dst, Register src, XMMRegister scratch);
417 void I8x16Splat(XMMRegister dst, Operand src, XMMRegister scratch);
430 void I16x8Splat(XMMRegister dst, Register src);
431 void I16x8Splat(XMMRegister dst, Operand src);
438 void I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src);
439 void I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src,
444 void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
449 void I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src);
450 void I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src,
452 void I64x2Neg(XMMRegister dst, XMMRegister src, XMMRegister scratch);
453 void I64x2Abs(XMMRegister dst, XMMRegister src, XMMRegister scratch);
458 void I64x2ShrS(XMMRegister dst, XMMRegister src, uint8_t shift,
460 void I64x2ShrS(XMMRegister dst, XMMRegister src, Register shift,
467 void I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src);
468 void I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src,
470 void S128Not(XMMRegister dst, XMMRegister src, XMMRegister scratch);
474 void S128Load8Splat(XMMRegister dst, Operand src, XMMRegister scratch);
475 void S128Load16Splat(XMMRegister dst, Operand src, XMMRegister scratch);
476 void S128Load32Splat(XMMRegister dst, Operand src);
477 void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
519 void I8x16SplatPreAvx2(XMMRegister dst, Op src, XMMRegister scratch);
521 void I16x8SplatPreAvx2(XMMRegister dst, Op src);
542 void Abspd(XMMRegister dst, XMMRegister src, Register tmp) {
543 FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
547 void Absps(XMMRegister dst, XMMRegister src, Register tmp) {
548 FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
552 void Negpd(XMMRegister dst, XMMRegister src, Register tmp) {
553 FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
557 void Negps(XMMRegister dst, XMMRegister src, Register tmp) {
558 FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
563 void Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
565 Movd(dst, src);
571 vpextrd(dst, src, imm8);
574 pextrd(dst, src, imm8);
577 impl()->PextrdPreSse41(dst, src, imm8);
597 void Pinsrd(XMMRegister dst, Op src, uint8_t imm8,
599 Pinsrd(dst, dst, src, imm8, load_pc_offset);
602 void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src,
608 if (!CpuFeatures::IsSupported(AVX) && dst != src) {
609 movaps(dst, src);
610 src = dst;
612 Unpcklps(dst, src,
622 void I32x4SConvertF32x4(XMMRegister dst, XMMRegister src, XMMRegister tmp,
638 vcmpeqps(tmp, src, src);
639 vandps(dst, src, tmp);
640 vcmpgeps(tmp, src, op);
644 if (src == dst) {
645 movaps(tmp, src);
654 cmpleps(tmp, src);
655 cvttps2dq(dst, src);
657 movaps(tmp, src);
664 void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
670 // Make sure we don't overwrite src.
671 if (dst == src) {
672 DCHECK_NE(src, scratch);
675 // dst = 0 if src == NaN, else all ones.
676 vcmpeqpd(dst, src, src);
677 // dst = 0 if src == NaN, else INT32_MAX as double.
682 // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
683 vminpd(dst, src, dst);
688 if (dst != src) {
689 movaps(dst, src);
701 void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
708 vmaxpd(dst, src, scratch);
725 if (dst != src) {
726 movaps(dst, src);
741 void I32x4TruncF64x2UZero(XMMRegister dst, XMMRegister src, Register tmp,
745 if (dst != src && !CpuFeatures::IsSupported(AVX)) {
746 movaps(dst, src);
747 src = dst;
750 Roundpd(dst, src, kRoundToZero);
760 void I32x4TruncF32x4U(XMMRegister dst, XMMRegister src, Register scratch,
767 vcmpltps(tmp, src, int32_overflow_op);
769 movaps(tmp, src);
773 Pand(tmp, src);
774 // tmp = src with all the valid conversions
775 if (dst != src) {
776 Movaps(dst, src);
803 void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
808 // pmaddwd multiplies signed words in src and op, producing
810 // src = |a|b|c|d|e|f|g|h|
812 if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
813 movaps(dst, src);
814 src = dst;
817 Pmaddwd(dst, src, op);
820 void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
830 vpmaddubsw(dst, scratch, src);
833 if (dst == src) {
835 pmaddubsw(scratch, src);
839 pmaddubsw(dst, src);
844 void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
851 vpmaddubsw(dst, src, op);
854 if (dst != src) {
855 movaps(dst, src);
861 void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
867 Pshufb(dst, src, mask);
878 vpshufb(dst, src, scratch);
882 if (dst != src) {
884 movaps(dst, src);
891 void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp1,
895 DCHECK_NE(src, tmp1);
897 DCHECK_NE(src, tmp2);
903 vpandn(tmp2, tmp1, src);
904 vpand(dst, tmp1, src);
917 movaps(tmp1, src);
919 if (dst != src) {
920 movaps(dst, src);
949 andps(tmp1, src);
950 andnps(tmp2, src);
971 void FloatUnop(XMMRegister dst, XMMRegister src, Register tmp,
973 if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
974 movaps(dst, src);
975 src = dst;
978 (assm->*op)(dst, src, ExternalReferenceAsOperand(ext, tmp));