/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
H A D | X86InstrFoldTables.cpp | 1 //===-- X86InstrFoldTables.cpp - X86 Instruction Folding Tables -----------===// 9 // This file contains the X86 memory folding tables. 31 // because as new instruction are added into holes in the X86 opcode map they 36 { X86::ADD16ri8_DB, X86::ADD16mi8, TB_NO_REVERSE }, 37 { X86::ADD16ri_DB, X86::ADD16mi, TB_NO_REVERSE }, 38 { X86::ADD16rr_DB, X86::ADD16mr, TB_NO_REVERSE }, 39 { X86 [all...] |
H A D | X86InstrInfo.cpp | 1 //===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===// 9 // This file contains the X86 implementation of the TargetInstrInfo class. 14 #include "X86.h" 57 " fuse, but the X86 backend currently can't"), 80 : X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64 in X86InstrInfo() 81 : X86::ADJCALLSTACKDOWN32), in X86InstrInfo() 82 (STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64 in X86InstrInfo() 83 : X86::ADJCALLSTACKUP32), in X86InstrInfo() 84 X86::CATCHRET, in X86InstrInfo() 85 (STI.is64Bit() ? X86 in X86InstrInfo() [all...] |
H A D | X86SpeculativeLoadHardening.cpp | 22 #include "X86.h" 128 return "X86 speculative load hardening"; 254 BuildMI(&MBB, DebugLoc(), TII.get(X86::JMP_1)).addMBB(&OldLayoutSucc); in splitEdge() 377 if (MI.getOpcode() == X86::LFENCE) in hasVulnerableLoad() 385 if (MI.getOpcode() == X86::MFENCE) in hasVulnerableLoad() 414 PS.emplace(MF, &X86::GR64_NOSPRegClass); in runOnMachineFunction() 447 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::MOV64ri32), PS->PoisonReg) in runOnMachineFunction() 460 BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86::LFENCE)); in runOnMachineFunction() 480 Register PredStateSubReg = MRI->createVirtualRegister(&X86::GR32RegClass); in runOnMachineFunction() 481 auto ZeroI = BuildMI(Entry, EntryInsertPt, Loc, TII->get(X86 in runOnMachineFunction() [all...] |
H A D | X86MCInstLower.cpp | 1 //===-- X86MCInstLower.cpp - Convert X86 MachineInstr to an MCInst --------===// 9 // This file contains code to lower X86 MachineInstrs to their corresponding 303 if (Reg != X86::AL && Reg != X86::AX && Reg != X86::EAX && Reg != X86::RAX) in SimplifyShortImmForm() 321 case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw in SimplifyMOVSX() 322 if (Op0 == X86::AX && Op1 == X86::AL) in SimplifyMOVSX() 323 NewOpcode = X86 in SimplifyMOVSX() [all...] |
H A D | X86FloatingPoint.cpp | 25 #include "X86.h" 84 StringRef getPassName() const override { return "X86 FP Stackifier"; } 131 static_assert(X86::FP6 - X86::FP0 == 6, "sequential regnums"); in calcLiveInMask() 132 if (Reg >= X86::FP0 && Reg <= X86::FP6) { in calcLiveInMask() 133 Mask |= 1 << (Reg - X86::FP0); in calcLiveInMask() 195 /// getStackEntry - Return the X86::FP<n> register in register ST(i). 202 /// getSTReg - Return the X86::ST(i) register which contains the specified 205 return StackTop - 1 - getSlot(RegNo) + X86 [all...] |
H A D | X86AvoidStoreForwardingBlocks.cpp | 58 cl::desc("X86: Disable Store Forwarding Blocks fixup."), cl::init(false)); 62 cl::desc("X86: Number of instructions backward to " 76 return "X86 Avoid Store Forwarding Blocks"; 133 return Opcode == X86::MOVUPSrm || Opcode == X86::MOVAPSrm || in isXMMLoadOpcode() 134 Opcode == X86::VMOVUPSrm || Opcode == X86::VMOVAPSrm || in isXMMLoadOpcode() 135 Opcode == X86::VMOVUPDrm || Opcode == X86::VMOVAPDrm || in isXMMLoadOpcode() 136 Opcode == X86 in isXMMLoadOpcode() [all...] |
H A D | X86RegisterInfo.cpp | 1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===// 9 // This file contains the X86 implementation of the TargetRegisterInfo class. 11 // on X86. 45 : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP), in X86RegisterInfo() 48 (TT.isArch64Bit() ? X86::RIP : X86::EIP)) { in X86RegisterInfo() 64 StackPtr = Use64BitReg ? X86::RSP : X86::ESP; in X86RegisterInfo() 65 FramePtr = Use64BitReg ? X86 in X86RegisterInfo() [all...] |
H A D | X86FlagsCopyLowering.cpp | 23 #include "X86.h" 76 using CondRegArray = std::array<unsigned, X86::LAST_VALID_COND + 1>; 82 StringRef getPassName() const override { return "X86 EFLAGS copy lowering"; } 102 DebugLoc TestLoc, X86::CondCode Cond); 106 X86::CondCode Cond, CondRegArray &CondRegs); 140 "X86 EFLAGS copy lowering", false, false) 142 "X86 EFLAGS copy lowering", false, false) 178 case X86::MNEMONIC##8##SUFFIX: \ in getMnemonicFromOpcode() 179 case X86::MNEMONIC##16##SUFFIX: \ in getMnemonicFromOpcode() 180 case X86 in getMnemonicFromOpcode() [all...] |
H A D | X86ExpandPseudo.cpp | 15 #include "X86.h" 29 #define X86_EXPAND_PSEUDO_NAME "X86 pseudo instruction expansion pass" 58 return "X86 pseudo instruction expansion pass"; 92 BuildMI(*MBB, MBBI, DL, TII->get(X86::LEA64r), X86::R11) in ExpandICallBranchFunnel() 93 .addReg(X86::RIP) in ExpandICallBranchFunnel() 99 BuildMI(*MBB, MBBI, DL, TII->get(X86::CMP64rr)) in ExpandICallBranchFunnel() 101 .addReg(X86::R11); in ExpandICallBranchFunnel() 107 if (!MBB->isLiveIn(X86::EFLAGS)) in ExpandICallBranchFunnel() 108 MBB->addLiveIn(X86 in ExpandICallBranchFunnel() [all...] |
H A D | X86DomainReassignment.cpp | 15 #include "X86.h" 39 cl::desc("X86: Disable Virtual Register Reassignment."), cl::init(false)); 45 return X86::GR64RegClass.hasSubClassEq(RC) || in isGPR() 46 X86::GR32RegClass.hasSubClassEq(RC) || in isGPR() 47 X86::GR16RegClass.hasSubClassEq(RC) || in isGPR() 48 X86::GR8RegClass.hasSubClassEq(RC); in isGPR() 53 return X86::VK16RegClass.hasSubClassEq(RC); in isMask() 69 if (X86::GR8RegClass.hasSubClassEq(SrcRC)) in getDstRC() 70 return &X86::VK8RegClass; in getDstRC() 71 if (X86 in getDstRC() [all...] |
H A D | X86InstructionSelector.cpp | 10 /// X86. 49 #define DEBUG_TYPE "X86-isel" 170 if (RB.getID() == X86::GPRRegBankID) { in getRegClass() 172 return &X86::GR8RegClass; in getRegClass() 174 return &X86::GR16RegClass; in getRegClass() 176 return &X86::GR32RegClass; in getRegClass() 178 return &X86::GR64RegClass; in getRegClass() 180 if (RB.getID() == X86::VECRRegBankID) { in getRegClass() 182 return STI.hasAVX512() ? &X86::FR32XRegClass : &X86 in getRegClass() [all...] |
H A D | X86EvexToVex.cpp | 24 #include "X86.h" 118 if (Reg >= X86::XMM16 && Reg <= X86::XMM31) in usesExtendedRegister() 122 if (Reg >= X86::YMM16 && Reg <= X86::YMM31) in usesExtendedRegister() 136 assert(!(Reg >= X86::ZMM0 && Reg <= X86::ZMM31) && in usesExtendedRegister() 151 case X86::VALIGNDZ128rri: in performCustomAdjustments() 152 case X86::VALIGNDZ128rmi: in performCustomAdjustments() 153 case X86 in performCustomAdjustments() [all...] |
H A D | X86TargetTransformInfo.h | 1 //===-- X86TargetTransformInfo.h - X86 specific TTI -------------*- C++ -*-===// 10 /// X86 target machine. It uses the target's detailed information to 19 #include "X86.h" 41 X86::Feature64Bit, 44 X86::FeatureNOPL, 45 X86::FeatureCMPXCHG16B, 46 X86::FeatureLAHFSAHF, 49 X86::FeatureFast11ByteNOP, 50 X86::FeatureFast15ByteNOP, 51 X86 [all...] |
H A D | X86FastISel.cpp | 1 //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===// 9 // This file defines the X86-specific support for the FastISel class. Much 15 #include "X86.h" 172 bool foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I, 232 X86::AddrIndexReg); in addFullAddress() 238 bool X86FastISel::foldX86XALUIntrinsic(X86::CondCode &CC, const Instruction *I, in foldX86XALUIntrinsic() 258 X86::CondCode TmpCC; in foldX86XALUIntrinsic() 264 case Intrinsic::umul_with_overflow: TmpCC = X86::COND_O; break; in foldX86XALUIntrinsic() 266 case Intrinsic::usub_with_overflow: TmpCC = X86::COND_B; break; in foldX86XALUIntrinsic() 337 Opc = X86 in X86FastEmitLoad() [all...] |
H A D | X86CondBrFolding.cpp | 45 #include "X86.h" 65 StringRef getPassName() const override { return "X86 CondBr Folding"; } 93 X86::CondCode BranchCode; 153 X86::CondCode CC = PredMBBInfo->BranchCode; in findPath() 154 assert(CC == X86::COND_L || CC == X86::COND_G || CC == X86::COND_E); in findPath() 156 bool ValueCmpTrue = ((CmpValue < PredCmpValue && CC == X86::COND_L) || in findPath() 157 (CmpValue > PredCmpValue && CC == X86::COND_G) || in findPath() 158 (CmpValue == PredCmpValue && CC == X86 in findPath() [all...] |
H A D | X86FrameLowering.cpp | 1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===// 9 // This file contains the X86 implementation of TargetFrameLowering class. 98 return X86::SUB64ri8; in getSUBriOpcode() 99 return X86::SUB64ri32; in getSUBriOpcode() 102 return X86::SUB32ri8; in getSUBriOpcode() 103 return X86::SUB32ri; in getSUBriOpcode() 110 return X86::ADD64ri8; in getADDriOpcode() 111 return X86::ADD64ri32; in getADDriOpcode() 114 return X86::ADD32ri8; in getADDriOpcode() 115 return X86 in getADDriOpcode() [all...] |
H A D | X86WinAllocaExpander.cpp | 17 #include "X86.h" 66 StringRef getPassName() const override { return "X86 WinAlloca Expander"; } 80 assert(MI->getOpcode() == X86::WIN_ALLOCA_32 || in getWinAllocaAmount() 81 MI->getOpcode() == X86::WIN_ALLOCA_64); in getWinAllocaAmount() 88 (Def->getOpcode() != X86::MOV32ri && Def->getOpcode() != X86::MOV64ri) || in getWinAllocaAmount() 112 case X86::PUSH32i8: in isPushPop() 113 case X86::PUSH32r: in isPushPop() 114 case X86::PUSH32rmm: in isPushPop() 115 case X86 in isPushPop() [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/MCTargetDesc/ |
H A D | X86MCTargetDesc.cpp | 1 //===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===// 9 // This file provides X86 specific target descriptions. 74 return MI.getFlags() & X86::IP_HAS_LOCK; in hasLockPrefix() 79 for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) { in initLLVMToSEHAndCVRegMapping() 89 {codeview::RegisterId::AL, X86::AL}, in initLLVMToSEHAndCVRegMapping() 90 {codeview::RegisterId::CL, X86::CL}, in initLLVMToSEHAndCVRegMapping() 91 {codeview::RegisterId::DL, X86::DL}, in initLLVMToSEHAndCVRegMapping() 92 {codeview::RegisterId::BL, X86::BL}, in initLLVMToSEHAndCVRegMapping() 93 {codeview::RegisterId::AH, X86 in initLLVMToSEHAndCVRegMapping() [all...] |
H A D | X86InstPrinterCommon.cpp | 1 //===--- X86InstPrinterCommon.cpp - X86 assembly instruction printing -----===// 110 case X86::VPCOMBmi: case X86::VPCOMBri: OS << "b\t"; break; in printVPCOMMnemonic() 111 case X86::VPCOMDmi: case X86::VPCOMDri: OS << "d\t"; break; in printVPCOMMnemonic() 112 case X86::VPCOMQmi: case X86::VPCOMQri: OS << "q\t"; break; in printVPCOMMnemonic() 113 case X86::VPCOMUBmi: case X86::VPCOMUBri: OS << "ub\t"; break; in printVPCOMMnemonic() 114 case X86 in printVPCOMMnemonic() [all...] |
H A D | X86ATTInstPrinter.cpp | 56 if (MI->getOpcode() == X86::CALLpcrel32 && in printInst() 57 (STI.getFeatureBits()[X86::Mode64Bit])) { in printInst() 66 else if (MI->getOpcode() == X86::DATA16_PREFIX && in printInst() 67 STI.getFeatureBits()[X86::Mode16Bit]) { in printInst() 92 case X86::CMPPDrmi: case X86::CMPPDrri: in printVecCompareInstr() 93 case X86::CMPPSrmi: case X86::CMPPSrri: in printVecCompareInstr() 94 case X86::CMPSDrm: case X86 in printVecCompareInstr() [all...] |
H A D | X86IntelInstPrinter.cpp | 45 if (MI->getOpcode() == X86::DATA16_PREFIX && in printInst() 46 STI.getFeatureBits()[X86::Mode16Bit]) { in printInst() 72 case X86::CMPPDrmi: case X86::CMPPDrri: in printVecCompareInstr() 73 case X86::CMPPSrmi: case X86::CMPPSrri: in printVecCompareInstr() 74 case X86::CMPSDrm: case X86::CMPSDrr: in printVecCompareInstr() 75 case X86::CMPSDrm_Int: case X86 in printVecCompareInstr() [all...] |
H A D | X86BaseInfo.h | 1 //===-- X86BaseInfo.h - Top level definitions for X86 -------- --*- C++ -*-===// 10 // the X86 target useful for the compiler back-end and the MC libraries. 26 namespace X86 { namespace 73 // X86 specific condition code. These correspond to X86_*_COND in 139 case X86::TEST16i16: in classifyFirstOpcodeInMacroFusion() 140 case X86::TEST16mr: in classifyFirstOpcodeInMacroFusion() 141 case X86::TEST16ri: in classifyFirstOpcodeInMacroFusion() 142 case X86::TEST16rr: in classifyFirstOpcodeInMacroFusion() 143 case X86::TEST32i32: in classifyFirstOpcodeInMacroFusion() 144 case X86 in classifyFirstOpcodeInMacroFusion() [all...] |
H A D | X86AsmBackend.cpp | 1 //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===// 38 /// A wrapper for holding a mask of the values from X86::AlignBranchBoundaryKind 51 addKind(X86::AlignBranchFused); in operator =() 53 addKind(X86::AlignBranchJcc); in operator =() 55 addKind(X86::AlignBranchJmp); in operator =() 57 addKind(X86::AlignBranchCall); in operator =() 59 addKind(X86::AlignBranchRet); in operator =() 61 addKind(X86::AlignBranchIndirect); in operator =() 72 void addKind(X86::AlignBranchBoundaryKind Value) { AlignBranchKind |= Value; } in addKind() 136 AlignBranchType.addKind(X86 in X86AsmBackend() [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Support/ |
H A D | Host.cpp | 564 *Type = X86::INTEL_i386; in getIntelProcessorTypeAndSubtype() 567 *Type = X86::INTEL_i486; in getIntelProcessorTypeAndSubtype() 570 if (Features & (1 << X86::FEATURE_MMX)) { in getIntelProcessorTypeAndSubtype() 571 *Type = X86::INTEL_PENTIUM_MMX; in getIntelProcessorTypeAndSubtype() 574 *Type = X86::INTEL_PENTIUM; in getIntelProcessorTypeAndSubtype() 579 *Type = X86::INTEL_PENTIUM_PRO; in getIntelProcessorTypeAndSubtype() 586 *Type = X86::INTEL_PENTIUM_II; in getIntelProcessorTypeAndSubtype() 594 *Type = X86::INTEL_PENTIUM_III; in getIntelProcessorTypeAndSubtype() 601 *Type = X86::INTEL_PENTIUM_M; in getIntelProcessorTypeAndSubtype() 605 *Type = X86 in getIntelProcessorTypeAndSubtype() [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/AsmParser/ |
H A D | X86AsmParser.cpp | 1 //===-- X86AsmParser.cpp - Parse X86 assembly to MCInst instructions ------===// 104 SwitchMode(X86::Mode32Bit); in MatchInstruction() 109 SwitchMode(X86::Mode16Bit); in MatchInstruction() 895 X86::CondCode ParseConditionCode(StringRef CCode); 967 return getSTI().getFeatureBits()[X86::Mode64Bit]; in is64BitMode() 971 return getSTI().getFeatureBits()[X86::Mode32Bit]; in is32BitMode() 975 return getSTI().getFeatureBits()[X86::Mode16Bit]; in is16BitMode() 979 FeatureBitset AllModes({X86::Mode64Bit, X86::Mode32Bit, X86 in SwitchMode() [all...] |