/third_party/skia/third_party/externals/spirv-tools/source/opt/ |
H A D | iterator.h | 261 template <typename VT, bool IC> 262 inline UptrVectorIterator<VT, IC>& UptrVectorIterator<VT, IC>::operator++() { in operator ++() 267 template <typename VT, bool IC> 268 inline UptrVectorIterator<VT, IC> UptrVectorIterator<VT, IC>::operator++(int) { in operator ++() 274 template <typename VT, bool IC> 275 inline UptrVectorIterator<VT, IC>& UptrVectorIterator<VT, IC>::operator--() { in operator --() 280 template <typename VT, bool IC> [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/SPIRV-Tools/source/opt/ |
H A D | iterator.h | 261 template <typename VT, bool IC> 262 inline UptrVectorIterator<VT, IC>& UptrVectorIterator<VT, IC>::operator++() { in operator ++() 267 template <typename VT, bool IC> 268 inline UptrVectorIterator<VT, IC> UptrVectorIterator<VT, IC>::operator++(int) { in operator ++() 274 template <typename VT, bool IC> 275 inline UptrVectorIterator<VT, IC>& UptrVectorIterator<VT, IC>::operator--() { in operator --() 280 template <typename VT, bool IC> [all...] |
/third_party/spirv-tools/source/opt/ |
H A D | iterator.h | 261 template <typename VT, bool IC> 262 inline UptrVectorIterator<VT, IC>& UptrVectorIterator<VT, IC>::operator++() { in operator ++() 267 template <typename VT, bool IC> 268 inline UptrVectorIterator<VT, IC> UptrVectorIterator<VT, IC>::operator++(int) { in operator ++() 274 template <typename VT, bool IC> 275 inline UptrVectorIterator<VT, IC>& UptrVectorIterator<VT, IC>::operator--() { in operator --() 280 template <typename VT, bool IC> [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/InstCombine/ |
H A D | InstCombineLoadStoreAlloca.cpp | 182 static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) { in simplifyAllocaArraySize() argument 190 Value *V = IC.Builder.getInt32(1); in simplifyAllocaArraySize() 199 AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName()); in simplifyAllocaArraySize() 212 Type *IdxTy = IC.getDataLayout().getIntPtrType(AI.getType()); in simplifyAllocaArraySize() 217 IC.InsertNewInstBefore(GEP, *It); in simplifyAllocaArraySize() 221 return IC.replaceInstUsesWith(AI, GEP); in simplifyAllocaArraySize() 226 return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); in simplifyAllocaArraySize() 230 Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType()); in simplifyAllocaArraySize() 232 Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false); in simplifyAllocaArraySize() 253 PointerReplacer(InstCombiner &IC) argument 263 InstCombiner &IC; global() member in __anon25051::PointerReplacer 482 combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value *V) combineStoreToNewValue() argument 574 combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) combineLoadToOperationType() argument 644 unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) unpackLoadToAggregate() argument 847 canReplaceGEPIdxWithZero(InstCombiner &IC, GetElementPtrInst *GEPI, Instruction *MemI, unsigned &Idx) canReplaceGEPIdxWithZero() argument 917 replaceGEPIdxWithZero(InstCombiner &IC, Value *Ptr, T &MemI) replaceGEPIdxWithZero() argument 1086 likeBitCastFromVector(InstCombiner &IC, Value *V) likeBitCastFromVector() argument 1147 combineStoreToValueType(InstCombiner &IC, StoreInst &SI) combineStoreToValueType() argument 1179 unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) unpackStoreToAggregate() argument 1323 removeBitcastsFromLoadStoreOnMinMax(InstCombiner &IC, StoreInst &SI) removeBitcastsFromLoadStoreOnMinMax() argument [all...] |
H A D | InstCombineCasts.cpp | 342 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombiner &IC, in canEvaluateTruncated() argument 359 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && in canEvaluateTruncated() 360 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); in canEvaluateTruncated() 369 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) && in canEvaluateTruncated() 370 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) { in canEvaluateTruncated() 371 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && in canEvaluateTruncated() 372 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); in canEvaluateTruncated() 383 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI); in canEvaluateTruncated() 396 IC.MaskedValueIsZero(I->getOperand(0), in canEvaluateTruncated() 398 return canEvaluateTruncated(I->getOperand(0), Ty, IC, Cxt in canEvaluateTruncated() 457 foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC) foldVecTruncToExtElt() argument 983 canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, InstCombiner &IC, Instruction *CxtI) canEvaluateZExtd() argument 1864 optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy, InstCombiner &IC) optimizeVectorResizeWithIntegerBitCasts() argument 2066 optimizeIntegerToVectorInsertions(BitCastInst &CI, InstCombiner &IC) optimizeIntegerToVectorInsertions() argument 2095 canonicalizeBitCastExtElt(BitCastInst &BitCast, InstCombiner &IC) canonicalizeBitCastExtElt() argument [all...] |
H A D | InstCombineShifts.cpp | 424 Instruction *InnerShift, InstCombiner &IC, in canEvaluateShiftedShift() 458 if (IC.MaskedValueIsZero(InnerShift->getOperand(0), Mask, 0, CxtI)) in canEvaluateShiftedShift() 476 InstCombiner &IC, Instruction *CxtI) { in canEvaluateShifted() 519 return canEvaluateShifted(I->getOperand(0), NumBits, IsLeftShift, IC, I) && in canEvaluateShifted() 520 canEvaluateShifted(I->getOperand(1), NumBits, IsLeftShift, IC, I); in canEvaluateShifted() 524 return canEvaluateShiftedShift(NumBits, IsLeftShift, I, IC, CxtI); in canEvaluateShifted() 530 return canEvaluateShifted(TrueVal, NumBits, IsLeftShift, IC, SI) && in canEvaluateShifted() 531 canEvaluateShifted(FalseVal, NumBits, IsLeftShift, IC, SI); in canEvaluateShifted() 539 if (!canEvaluateShifted(IncValue, NumBits, IsLeftShift, IC, PN)) in canEvaluateShifted() 612 InstCombiner &IC, cons in getShiftedValue() 423 canEvaluateShiftedShift(unsigned OuterShAmt, bool IsOuterShl, Instruction *InnerShift, InstCombiner &IC, Instruction *CxtI) canEvaluateShiftedShift() argument 475 canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift, InstCombiner &IC, Instruction *CxtI) canEvaluateShifted() argument 611 getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift, InstCombiner &IC, const DataLayout &DL) getShiftedValue() argument [all...] |
H A D | InstCombineCalls.cpp | 1163 InstCombiner &IC) { in simplifyInvariantGroupIntrinsic() 1173 Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg); in simplifyInvariantGroupIntrinsic() 1175 Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg); in simplifyInvariantGroupIntrinsic() 1181 Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType()); in simplifyInvariantGroupIntrinsic() 1183 Result = IC.Builder.CreateBitCast(Result, II.getType()); in simplifyInvariantGroupIntrinsic() 1188 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) { in foldCttzCtlz() argument 1220 KnownBits Known = IC.computeKnownBits(Op0, 0, &II); in foldCttzCtlz() 1234 return IC.replaceInstUsesWith(II, C); in foldCttzCtlz() 1241 isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC in foldCttzCtlz() 1162 simplifyInvariantGroupIntrinsic(IntrinsicInst &II, InstCombiner &IC) simplifyInvariantGroupIntrinsic() argument 1264 foldCtpop(IntrinsicInst &II, InstCombiner &IC) foldCtpop() argument 1304 simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) simplifyX86MaskedLoad() argument 1340 simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) simplifyX86MaskedStore() argument 1485 removeTriviallyEmptyRange(IntrinsicInst &I, unsigned StartID, unsigned EndID, InstCombiner &IC) removeTriviallyEmptyRange() argument 1508 SimplifyNVVMIntrinsic(IntrinsicInst *II, InstCombiner &IC) SimplifyNVVMIntrinsic() argument [all...] |
H A D | InstCombineMulDivRem.cpp | 49 static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC, in simplifyValueKnownNonZero() argument 63 A = IC.Builder.CreateSub(A, B); in simplifyValueKnownNonZero() 64 return IC.Builder.CreateShl(One, A); in simplifyValueKnownNonZero() 71 IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) { in simplifyValueKnownNonZero() 74 if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) { in simplifyValueKnownNonZero() 818 InstCombiner &IC); 847 const BinaryOperator &I, InstCombiner &IC) { in foldUDivPow2Cst() 860 InstCombiner &IC) { in foldUDivShl() 872 N = IC.Builder.CreateAdd(N, Log2Base); in foldUDivShl() 874 N = IC in foldUDivShl() 846 foldUDivPow2Cst(Value *Op0, Value *Op1, const BinaryOperator &I, InstCombiner &IC) foldUDivPow2Cst() argument 859 foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I, InstCombiner &IC) foldUDivShl() argument [all...] |
H A D | InstCombineSelect.cpp | 539 static Value *foldSelectICmpLshrAshr(const ICmpInst *IC, Value *TrueVal, in foldSelectICmpLshrAshr() argument 542 ICmpInst::Predicate Pred = IC->getPredicate(); in foldSelectICmpLshrAshr() 543 Value *CmpLHS = IC->getOperand(0); in foldSelectICmpLshrAshr() 544 Value *CmpRHS = IC->getOperand(1); in foldSelectICmpLshrAshr() 568 return Builder.CreateAShr(X, Y, IC->getName(), IsExact); in foldSelectICmpLshrAshr() 587 static Value *foldSelectICmpAndOr(const ICmpInst *IC, Value *TrueVal, in foldSelectICmpAndOr() argument 593 TrueVal->getType()->isVectorTy() != IC->getType()->isVectorTy()) in foldSelectICmpAndOr() 596 Value *CmpLHS = IC->getOperand(0); in foldSelectICmpAndOr() 597 Value *CmpRHS = IC->getOperand(1); in foldSelectICmpAndOr() 603 if (IC in foldSelectICmpAndOr() [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/IPO/ |
H A D | Inliner.cpp | 302 /// inlined which is proved to be more beneficial. \p IC is the 307 shouldBeDeferred(Function *Caller, CallSite CS, InlineCost IC, in shouldBeDeferred() argument 316 if (IC.getCost() <= 0) in shouldBeDeferred() 336 int CandidateCost = IC.getCost() - 1; in shouldBeDeferred() 346 if (!ApplyLastCallBonus && TotalSecondaryCost >= IC.getCost()) in shouldBeDeferred() 382 if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) in shouldBeDeferred() 394 RemarkT &operator<<(RemarkT &&R, const InlineCost &IC) { in operator <<() argument 396 if (IC.isAlways()) { in operator <<() 398 } else if (IC.isNever()) { in operator <<() 401 R << "(cost=" << ore::NV("Cost", IC in operator <<() 409 inlineCostStr(const InlineCost &IC) inlineCostStr() argument 411 Remark << IC; inlineCostStr() local 507 emit_inlined_into(OptimizationRemarkEmitter &ORE, DebugLoc &DLoc, const BasicBlock *Block, const Function &Callee, const Function &Caller, const InlineCost &IC) emit_inlined_into() argument [all...] |
H A D | PartialInlining.cpp | 780 InlineCost IC = getInlineCost(cast<CallBase>(*Call), getInlineParams(), in shouldPartialInline() local 784 if (IC.isAlways()) { in shouldPartialInline() 793 if (IC.isNever()) { in shouldPartialInline() 803 if (!IC) { in shouldPartialInline() 808 << NV("Cost", IC.getCost()) << ", threshold=" in shouldPartialInline() 809 << NV("Threshold", IC.getCostDelta() + IC.getCost()) << ")"; in shouldPartialInline() 839 << NV("Caller", Caller) << " with cost=" << NV("Cost", IC.getCost()) in shouldPartialInline() 841 << NV("Threshold", IC.getCostDelta() + IC in shouldPartialInline() [all...] |
/third_party/node/deps/v8/src/ic/ |
H A D | ic.h | 25 // IC is the base class for LoadIC, StoreIC, KeyedLoadIC, and KeyedStoreIC. 27 class IC { class 29 // Alias the inline cache state type to make the IC code more readable. 32 // Construct the IC structure with the given number of extra 34 IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot, 36 virtual ~IC() = default; 40 // Compute the current IC state based on the target stub, lookup_start_object 66 // Nofity the IC system that a feedback has changed. 85 bool ConfigureVectorState(IC::State new_state, Handle<Object> key); 97 char TransitionMarkFromState(IC [all...] |
H A D | ic-inl.h | 19 void IC::update_lookup_start_object_map(Handle<Object> object) { in update_lookup_start_object_map() 28 bool IC::IsHandler(MaybeObject object) { in IsHandler() 37 bool IC::vector_needs_update() { in vector_needs_update()
|
H A D | ic.cc | 66 char IC::TransitionMarkFromState(IC::State state) { in TransitionMarkFromState() 111 void IC::TraceIC(const char* type, Handle<Object> name) { in TraceIC() 118 void IC::TraceIC(const char* type, Handle<Object> name, State old_state, in TraceIC() 173 // Reserve enough space for IC transition state, the longest length is 17. in TraceIC() 193 IC::IC(Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot slot, in IC() function in v8::internal::IC 241 bool IC::ShouldRecomputeHandler(Handle<String> name) { in ShouldRecomputeHandler() 269 bool IC::RecomputeHandlerForName(Handle<Object> name) { in RecomputeHandlerForName() 280 void IC [all...] |
H A D | stub-cache.cc | 20 DCHECK(!IC::IsHandler(MaybeObject())); in StubCache() 76 if (handler->ptr() != kNullAddress) DCHECK(IC::IsHandler(handler)); in CommonStubCacheChecks()
|
/third_party/ffmpeg/libavutil/ |
H A D | cast5.c | 32 #define IC(x) (((x) >> 8) & 0xff) macro 40 f = ((S1[IA(I)] + S2[IB(I)]) ^ S3[IC(I)]) - S4[ID(I)]; \ 47 f = ((S1[IA(I)] - S2[IB(I)]) + S3[IC(I)]) ^ S4[ID(I)]; \ 54 f = ((S1[IA(I)] ^ S2[IB(I)]) - S3[IC(I)]) + S4[ID(I)]; \ 60 z[0] = x[0] ^ S5[IB(x[3])] ^ S6[ID(x[3])] ^ S7[IA(x[3])] ^ S8[IC(x[3])] ^ S7[IA(x[2])]; \ 61 z[1] = x[2] ^ S5[IA(z[0])] ^ S6[IC(z[0])] ^ S7[IB(z[0])] ^ S8[ID(z[0])] ^ S8[IC(x[2])]; \ 62 z[2] = x[3] ^ S5[ID(z[1])] ^ S6[IC(z[1])] ^ S7[IB(z[1])] ^ S8[IA(z[1])] ^ S5[IB(x[2])]; \ 63 z[3] = x[1] ^ S5[IC(z[2])] ^ S6[IB(z[2])] ^ S7[ID(z[2])] ^ S8[IA(z[2])] ^ S6[ID(x[2])]; \ 68 x[0] = z[2] ^ S5[IB(z[1])] ^ S6[ID(z[1])] ^ S7[IA(z[1])] ^ S8[IC( [all...] |
/third_party/typescript/tests/baselines/reference/ |
H A D | genericTypeWithMultipleBases3.js | 14 interface IC<T> extends IA<T>, IB<T> { } 16 var c: IC<number>;
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUInline.cpp | 217 auto IC = llvm::getInlineCost(cast<CallBase>(*CS.getInstruction()), Callee, in getInlineCost() local 221 if (IC && !IC.isAlways() && !Callee->hasFnAttribute(Attribute::InlineHint)) { in getInlineCost() 227 return IC; in getInlineCost()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Scalar/ |
H A D | LoopSink.cpp | 230 Instruction *IC = I.clone(); in sinkInstruction() local 231 IC->setName(I.getName()); in sinkInstruction() 232 IC->insertBefore(&*N->getFirstInsertionPt()); in sinkInstruction() 233 // Replaces uses of I with IC in N in sinkInstruction() 234 I.replaceUsesWithIf(IC, [N](Use &U) { in sinkInstruction() 237 // Replaces uses of I with IC in blocks dominated by N in sinkInstruction() 238 replaceDominatedUsesWith(&I, IC, DT, N); in sinkInstruction()
|
H A D | GuardWidening.cpp | 595 auto *IC = dyn_cast<ICmpInst>(CheckCond); 596 if (!IC || !IC->getOperand(0)->getType()->isIntegerTy() || 597 (IC->getPredicate() != ICmpInst::ICMP_ULT && 598 IC->getPredicate() != ICmpInst::ICMP_UGT)) 601 const Value *CmpLHS = IC->getOperand(0), *CmpRHS = IC->getOperand(1); 602 if (IC->getPredicate() == ICmpInst::ICMP_UGT) 605 auto &DL = IC->getModule()->getDataLayout(); 609 CmpRHS, IC); [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/AsmParser/ |
H A D | X86AsmParser.cpp | 349 InfixCalculator IC; member in __anon24885::X86AsmParser::IntelExprStateMachine 382 int64_t getImm() { return Imm + IC.execute(); } in getImm() 399 IC.pushOperator(IC_OR); in onOr() 414 IC.pushOperator(IC_XOR); in onXor() 429 IC.pushOperator(IC_AND); in onAnd() 444 IC.pushOperator(IC_LSHIFT); in onLShift() 459 IC.pushOperator(IC_RSHIFT); in onRShift() 475 IC.pushOperator(IC_PLUS); in onPlus() 524 IC.pushOperator(IC_MINUS); in onMinus() 530 IC in onMinus() [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/ |
H A D | CaptureTracking.cpp | 79 bool IncludeI, OrderedBasicBlock *IC) in CapturesBefore() 80 : OrderedBB(IC), BeforeHere(I), DT(DT), in CapturesBefore()
|
/third_party/mesa3d/src/gallium/drivers/nouveau/nv50/ |
H A D | nv50_formats.c | 314 C4(A, R16G16B16A16_UNORM, RGBA16_UNORM, R, G, B, A, UNORM, R16_G16_B16_A16, IC), 315 C4(A, R16G16B16A16_SNORM, RGBA16_SNORM, R, G, B, A, SNORM, R16_G16_B16_A16, IC), 325 F2(A, R16G16_UNORM, RG16_UNORM, R, G, xx, xx, UNORM, R16_G16, IC), 326 F2(A, R16G16_SNORM, RG16_SNORM, R, G, xx, xx, SNORM, R16_G16, IC), 331 F1(A, R16_UNORM, R16_UNORM, R, xx, xx, xx, UNORM, R16, IC), 332 F1(A, R16_SNORM, R16_SNORM, R, xx, xx, xx, SNORM, R16, IC), 336 C4(A, R8G8B8A8_SNORM, RGBA8_SNORM, R, G, B, A, SNORM, A8B8G8R8, IC), 344 F2(A, R8G8_SNORM, RG8_SNORM, R, G, xx, xx, SNORM, G8R8, IC), 353 F1(A, R8_SNORM, R8_SNORM, R, xx, xx, xx, SNORM, R8, IC),
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Vectorize/ |
H A D | LoopVectorize.cpp | 5232 // this currently happens when OptForSize, in which case IC is set to 1 above. 5233 unsigned IC = UINT_MAX; 5260 IC = std::min(IC, TmpIC); 5288 // Clamp the calculated IC to be between the 1 and the max interleave count 5290 if (IC > MaxInterleaveCount) 5291 IC = MaxInterleaveCount; 5292 else if (IC < 1) 5293 IC = 1; 5299 return IC; [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
H A D | X86DomainReassignment.cpp | 474 InstrConverterBase *IC = Converters.lookup({i, MI->getOpcode()}); in encloseInstr() local 475 if (!IC || !IC->isLegal(MI, TII)) in encloseInstr()
|