33using namespace PatternMatch;
35#define DEBUG_TYPE "instcombine"
44 const APInt &In2,
bool IsSigned =
false) {
47 Result = In1.
sadd_ov(In2, Overflow);
49 Result = In1.
uadd_ov(In2, Overflow);
57 const APInt &In2,
bool IsSigned =
false) {
60 Result = In1.
ssub_ov(In2, Overflow);
62 Result = In1.
usub_ov(In2, Overflow);
70 for (
auto *U :
I.users())
71 if (isa<BranchInst>(U))
81 if (!ICmpInst::isSigned(Pred))
88 if (Pred == ICmpInst::ICMP_SLT) {
89 Pred = ICmpInst::ICMP_SLE;
92 }
else if (
C.isAllOnes()) {
93 if (Pred == ICmpInst::ICMP_SGT) {
94 Pred = ICmpInst::ICMP_SGE;
119 if (!isa<ConstantArray>(
Init) && !isa<ConstantDataArray>(
Init))
122 uint64_t ArrayElementCount =
Init->getType()->getArrayNumElements();
131 if (
GEP->getNumOperands() < 3 || !isa<ConstantInt>(
GEP->getOperand(1)) ||
132 !cast<ConstantInt>(
GEP->getOperand(1))->isZero() ||
133 isa<Constant>(
GEP->getOperand(2)))
141 Type *EltTy =
Init->getType()->getArrayElementType();
142 for (
unsigned i = 3, e =
GEP->getNumOperands(); i != e; ++i) {
148 if ((
unsigned)IdxVal != IdxVal)
151 if (
StructType *STy = dyn_cast<StructType>(EltTy))
152 EltTy = STy->getElementType(IdxVal);
153 else if (
ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
154 if (IdxVal >= ATy->getNumElements())
156 EltTy = ATy->getElementType();
164 enum { Overdefined = -3, Undefined = -2 };
173 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
177 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
185 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
194 for (
unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
200 if (!LaterIndices.
empty()) {
215 CompareRHS,
DL, &
TLI);
220 if (isa<UndefValue>(
C)) {
223 if (TrueRangeEnd == (
int)i - 1)
225 if (FalseRangeEnd == (
int)i - 1)
232 if (!isa<ConstantInt>(
C))
237 bool IsTrueForElt = !cast<ConstantInt>(
C)->isZero();
242 if (FirstTrueElement == Undefined)
243 FirstTrueElement = TrueRangeEnd = i;
246 if (SecondTrueElement == Undefined)
247 SecondTrueElement = i;
249 SecondTrueElement = Overdefined;
252 if (TrueRangeEnd == (
int)i - 1)
255 TrueRangeEnd = Overdefined;
259 if (FirstFalseElement == Undefined)
260 FirstFalseElement = FalseRangeEnd = i;
263 if (SecondFalseElement == Undefined)
264 SecondFalseElement = i;
266 SecondFalseElement = Overdefined;
269 if (FalseRangeEnd == (
int)i - 1)
272 FalseRangeEnd = Overdefined;
277 if (i < 64 && IsTrueForElt)
278 MagicBitvector |= 1ULL << i;
283 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
284 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
285 FalseRangeEnd == Overdefined)
296 if (!
GEP->isInBounds()) {
299 if (
Idx->getType()->getPrimitiveSizeInBits().getFixedValue() > OffsetSize)
310 unsigned ElementSize =
323 if (SecondTrueElement != Overdefined) {
326 if (FirstTrueElement == Undefined)
329 Value *FirstTrueIdx = ConstantInt::get(
Idx->getType(), FirstTrueElement);
332 if (SecondTrueElement == Undefined)
337 Value *SecondTrueIdx = ConstantInt::get(
Idx->getType(), SecondTrueElement);
339 return BinaryOperator::CreateOr(C1, C2);
344 if (SecondFalseElement != Overdefined) {
347 if (FirstFalseElement == Undefined)
350 Value *FirstFalseIdx = ConstantInt::get(
Idx->getType(), FirstFalseElement);
353 if (SecondFalseElement == Undefined)
358 Value *SecondFalseIdx =
359 ConstantInt::get(
Idx->getType(), SecondFalseElement);
361 return BinaryOperator::CreateAnd(C1, C2);
366 if (TrueRangeEnd != Overdefined) {
367 assert(TrueRangeEnd != FirstTrueElement &&
"Should emit single compare");
371 if (FirstTrueElement) {
372 Value *Offs = ConstantInt::get(
Idx->getType(), -FirstTrueElement);
377 ConstantInt::get(
Idx->getType(), TrueRangeEnd - FirstTrueElement + 1);
382 if (FalseRangeEnd != Overdefined) {
383 assert(FalseRangeEnd != FirstFalseElement &&
"Should emit single compare");
386 if (FirstFalseElement) {
387 Value *Offs = ConstantInt::get(
Idx->getType(), -FirstFalseElement);
392 ConstantInt::get(
Idx->getType(), FalseRangeEnd - FirstFalseElement);
405 if (ArrayElementCount <= Idx->
getType()->getIntegerBitWidth())
439 while (!WorkList.
empty()) {
442 while (!WorkList.
empty()) {
443 if (Explored.
size() >= 100)
453 if (!isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
458 if (
auto *
GEP = dyn_cast<GEPOperator>(V)) {
460 auto IsNonConst = [](
Value *V) {
return !isa<ConstantInt>(V); };
461 if (!
GEP->isInBounds() ||
count_if(
GEP->indices(), IsNonConst) > 1)
469 if (WorkList.
back() == V) {
475 if (
auto *PN = dyn_cast<PHINode>(V)) {
477 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
485 for (
auto *PN : PHIs)
486 for (
Value *
Op : PN->incoming_values())
494 for (
Value *Val : Explored) {
497 auto *
PHI = dyn_cast<PHINode>(
Use);
498 auto *Inst = dyn_cast<Instruction>(Val);
500 if (Inst ==
Base || Inst ==
PHI || !Inst || !
PHI ||
504 if (
PHI->getParent() == Inst->getParent())
515 if (
auto *
PHI = dyn_cast<PHINode>(V)) {
520 if (
auto *
I = dyn_cast<Instruction>(V)) {
522 I = &*std::next(
I->getIterator());
526 if (
auto *
A = dyn_cast<Argument>(V)) {
528 BasicBlock &Entry =
A->getParent()->getEntryBlock();
534 assert(isa<Constant>(V) &&
"Setting insertion point for unknown value!");
551 Base->getContext(),
DL.getIndexTypeSizeInBits(Start->getType()));
557 for (
Value *Val : Explored) {
562 if (
auto *
PHI = dyn_cast<PHINode>(Val))
565 PHI->getName() +
".idx",
PHI->getIterator());
570 for (
Value *Val : Explored) {
574 if (
auto *
GEP = dyn_cast<GEPOperator>(Val)) {
578 if (isa<ConstantInt>(
Op) && cast<ConstantInt>(
Op)->
isZero())
579 NewInsts[
GEP] = OffsetV;
582 Op, OffsetV,
GEP->getOperand(0)->getName() +
".add",
587 if (isa<PHINode>(Val))
594 for (
Value *Val : Explored) {
599 if (
auto *
PHI = dyn_cast<PHINode>(Val)) {
601 for (
unsigned I = 0, E =
PHI->getNumIncomingValues();
I < E; ++
I) {
602 Value *NewIncoming =
PHI->getIncomingValue(
I);
604 auto It = NewInsts.
find(NewIncoming);
605 if (It != NewInsts.
end())
606 NewIncoming = It->second;
613 for (
Value *Val : Explored) {
620 Val->getName() +
".ptr", NW);
627 return NewInsts[Start];
689 if (!isa<GetElementPtrInst>(
RHS))
721 isa<Constant>(
RHS) && cast<Constant>(
RHS)->isNullValue() &&
743 auto EC = cast<VectorType>(GEPLHS->
getType())->getElementCount();
748 cast<Constant>(
RHS),
Base->getType()));
754 if (PtrBase != GEPRHS->getOperand(0)) {
755 bool IndicesTheSame =
758 GEPRHS->getPointerOperand()->getType() &&
762 if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
763 IndicesTheSame =
false;
769 if (IndicesTheSame &&
777 if (GEPLHS->
isInBounds() && GEPRHS->isInBounds() &&
779 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
783 Value *LOffset = EmitGEPOffset(GEPLHS);
784 Value *ROffset = EmitGEPOffset(GEPRHS);
791 if (LHSIndexTy != RHSIndexTy) {
813 unsigned NumDifferences = 0;
814 unsigned DiffOperand = 0;
815 for (
unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
816 if (GEPLHS->
getOperand(i) != GEPRHS->getOperand(i)) {
818 Type *RHSType = GEPRHS->getOperand(i)->getType();
829 if (NumDifferences++)
break;
833 if (NumDifferences == 0)
837 else if (NumDifferences == 1 && CanFold(NW)) {
839 Value *RHSV = GEPRHS->getOperand(DiffOperand);
840 return NewICmp(NW, LHSV, RHSV);
846 Value *L = EmitGEPOffset(GEPLHS,
true);
847 Value *R = EmitGEPOffset(GEPRHS,
true);
848 return NewICmp(NW, L, R);
874 bool Captured =
false;
879 CmpCaptureTracker(
AllocaInst *Alloca) : Alloca(Alloca) {}
881 void tooManyUses()
override { Captured =
true; }
883 bool captured(
const Use *U)
override {
884 auto *ICmp = dyn_cast<ICmpInst>(U->getUser());
892 ICmps[ICmp] |= 1u << U->getOperandNo();
901 CmpCaptureTracker Tracker(Alloca);
903 if (Tracker.Captured)
906 bool Changed =
false;
907 for (
auto [ICmp,
Operands] : Tracker.ICmps) {
913 auto *Res = ConstantInt::get(
939 assert(!!
C &&
"C should not be zero!");
945 Constant *R = ConstantInt::get(
X->getType(),
955 ConstantInt::get(
X->getType(), -
C));
967 ConstantInt::get(
X->getType(),
SMax -
C));
978 ConstantInt::get(
X->getType(),
SMax - (
C - 1)));
987 assert(
I.isEquality() &&
"Cannot fold icmp gt/lt");
990 if (
I.getPredicate() ==
I.ICMP_NE)
999 bool IsAShr = isa<AShrOperator>(
I.getOperand(0));
1011 return getICmp(
I.ICMP_UGT,
A,
1012 ConstantInt::get(
A->getType(), AP2.
logBase2()));
1024 if (IsAShr && AP1 == AP2.
ashr(Shift)) {
1028 return getICmp(
I.ICMP_UGE,
A, ConstantInt::get(
A->getType(), Shift));
1029 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1030 }
else if (AP1 == AP2.
lshr(Shift)) {
1031 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1037 auto *TorF = ConstantInt::get(
I.getType(),
I.getPredicate() ==
I.ICMP_NE);
1046 assert(
I.isEquality() &&
"Cannot fold icmp gt/lt");
1049 if (
I.getPredicate() ==
I.ICMP_NE)
1060 if (!AP1 && AP2TrailingZeros != 0)
1063 ConstantInt::get(
A->getType(), AP2.
getBitWidth() - AP2TrailingZeros));
1071 if (Shift > 0 && AP2.
shl(Shift) == AP1)
1072 return getICmp(
I.ICMP_EQ,
A, ConstantInt::get(
A->getType(), Shift));
1076 auto *TorF = ConstantInt::get(
I.getType(),
I.getPredicate() ==
I.ICMP_NE);
1097 Instruction *AddWithCst = cast<Instruction>(
I.getOperand(0));
1105 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1129 if (U == AddWithCst)
1147 I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1176 if (!
I.isEquality())
1207 APInt(XBitWidth, XBitWidth - 1))))
1209 }
else if (isa<BinaryOperator>(Val) &&
1234 return new ICmpInst(Pred,
B, Cmp.getOperand(1));
1236 return new ICmpInst(Pred,
A, Cmp.getOperand(1));
1253 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1265 return new ICmpInst(Pred,
Y, Cmp.getOperand(1));
1271 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1273 auto *BO0 = cast<OverflowingBinaryOperator>(Cmp.getOperand(0));
1274 if (BO0->hasNoUnsignedWrap() || BO0->hasNoSignedWrap()) {
1282 return new ICmpInst(Pred,
Y, Cmp.getOperand(1));
1287 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
1319 Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1332 if (
auto *Phi = dyn_cast<PHINode>(Op0))
1333 if (
all_of(Phi->operands(), [](
Value *V) { return isa<Constant>(V); })) {
1335 for (
Value *V : Phi->incoming_values()) {
1344 for (
auto [V, Pred] :
zip(Ops, Phi->blocks()))
1359 Value *
X = Cmp.getOperand(0), *
Y = Cmp.getOperand(1);
1392 if (Cmp.isEquality() || (IsSignBit &&
hasBranchUse(Cmp)))
1397 if (Cmp.hasOneUse() &&
1411 if (!
match(BI->getCondition(),
1417 if (
auto *V = handleDomCond(DomPred, DomC))
1437 Type *SrcTy =
X->getType();
1443 if (shouldChangeType(Trunc->
getType(), SrcTy)) {
1445 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy,
C.sext(SrcBits)));
1447 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy,
C.zext(SrcBits)));
1450 if (
C.isOne() &&
C.getBitWidth() > 1) {
1455 ConstantInt::get(V->getType(), 1));
1465 auto NewPred = (Pred == Cmp.ICMP_EQ) ? Cmp.ICMP_UGE : Cmp.ICMP_ULT;
1466 return new ICmpInst(NewPred,
Y, ConstantInt::get(SrcTy, DstBits));
1471 return new ICmpInst(Pred,
Y, ConstantInt::get(SrcTy,
C.logBase2()));
1474 if (Cmp.isEquality() && Trunc->
hasOneUse()) {
1477 if (!SrcTy->
isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
1481 Constant *WideC = ConstantInt::get(SrcTy,
C.zext(SrcBits));
1490 if ((Known.
Zero | Known.
One).countl_one() >= SrcBits - DstBits) {
1492 APInt NewRHS =
C.zext(SrcBits);
1494 return new ICmpInst(Pred,
X, ConstantInt::get(SrcTy, NewRHS));
1502 const APInt *ShAmtC;
1523 bool YIsSExt =
false;
1526 unsigned NoWrapFlags = cast<TruncInst>(Cmp.getOperand(0))->getNoWrapKind() &
1527 cast<TruncInst>(Cmp.getOperand(1))->getNoWrapKind();
1528 if (Cmp.isSigned()) {
1539 if (
X->getType() !=
Y->getType() &&
1540 (!Cmp.getOperand(0)->hasOneUse() || !Cmp.getOperand(1)->hasOneUse()))
1542 if (!isDesirableIntType(
X->getType()->getScalarSizeInBits()) &&
1543 isDesirableIntType(
Y->getType()->getScalarSizeInBits())) {
1545 Pred = Cmp.getSwappedPredicate(Pred);
1550 else if (!Cmp.isSigned() &&
1560 isa<SExtInst>(Cmp.getOperand(0)) || isa<SExtInst>(Cmp.getOperand(1));
1564 Type *TruncTy = Cmp.getOperand(0)->getType();
1569 if (isDesirableIntType(TruncBits) &&
1570 !isDesirableIntType(
X->getType()->getScalarSizeInBits()))
1593 bool TrueIfSigned =
false;
1610 if (
Xor->hasOneUse()) {
1612 if (!Cmp.isEquality() && XorC->
isSignMask()) {
1613 Pred = Cmp.getFlippedSignednessPredicate();
1614 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(),
C ^ *XorC));
1619 Pred = Cmp.getFlippedSignednessPredicate();
1620 Pred = Cmp.getSwappedPredicate(Pred);
1621 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(),
C ^ *XorC));
1628 if (*XorC == ~
C && (
C + 1).isPowerOf2())
1631 if (*XorC ==
C && (
C + 1).isPowerOf2())
1636 if (*XorC == -
C &&
C.isPowerOf2())
1638 ConstantInt::get(
X->getType(), ~
C));
1640 if (*XorC ==
C && (-
C).isPowerOf2())
1642 ConstantInt::get(
X->getType(), ~
C));
1664 const APInt *ShiftC;
1669 Type *XType =
X->getType();
1675 return new ICmpInst(Pred,
Add, ConstantInt::get(XType, Bound));
1684 if (!Shift || !Shift->
isShift())
1692 unsigned ShiftOpcode = Shift->
getOpcode();
1693 bool IsShl = ShiftOpcode == Instruction::Shl;
1696 APInt NewAndCst, NewCmpCst;
1697 bool AnyCmpCstBitsShiftedOut;
1698 if (ShiftOpcode == Instruction::Shl) {
1706 NewCmpCst = C1.
lshr(*C3);
1707 NewAndCst = C2.
lshr(*C3);
1708 AnyCmpCstBitsShiftedOut = NewCmpCst.
shl(*C3) != C1;
1709 }
else if (ShiftOpcode == Instruction::LShr) {
1714 NewCmpCst = C1.
shl(*C3);
1715 NewAndCst = C2.
shl(*C3);
1716 AnyCmpCstBitsShiftedOut = NewCmpCst.
lshr(*C3) != C1;
1722 assert(ShiftOpcode == Instruction::AShr &&
"Unknown shift opcode");
1723 NewCmpCst = C1.
shl(*C3);
1724 NewAndCst = C2.
shl(*C3);
1725 AnyCmpCstBitsShiftedOut = NewCmpCst.
ashr(*C3) != C1;
1726 if (NewAndCst.
ashr(*C3) != C2)
1730 if (AnyCmpCstBitsShiftedOut) {
1740 Shift->
getOperand(0), ConstantInt::get(
And->getType(), NewAndCst));
1741 return new ICmpInst(Cmp.getPredicate(),
1742 NewAnd, ConstantInt::get(
And->getType(), NewCmpCst));
1759 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1774 if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.
isZero() &&
1776 return new TruncInst(
And->getOperand(0), Cmp.getType());
1787 ConstantInt::get(
X->getType(), ~*C2));
1792 ConstantInt::get(
X->getType(), -*C2));
1795 if (!
And->hasOneUse())
1798 if (Cmp.isEquality() && C1.
isZero()) {
1816 Constant *NegBOC = ConstantInt::get(
And->getType(), -NewC2);
1818 return new ICmpInst(NewPred,
X, NegBOC);
1836 if (!Cmp.getType()->isVectorTy()) {
1837 Type *WideType = W->getType();
1839 Constant *ZextC1 = ConstantInt::get(WideType, C1.
zext(WideScalarBits));
1840 Constant *ZextC2 = ConstantInt::get(WideType, C2->
zext(WideScalarBits));
1842 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1853 if (!Cmp.isSigned() && C1.
isZero() &&
And->getOperand(0)->hasOneUse() &&
1855 Constant *One = cast<Constant>(
And->getOperand(1));
1860 unsigned UsesRemoved = 0;
1861 if (
And->hasOneUse())
1863 if (
Or->hasOneUse())
1870 if (UsesRemoved >= RequireUsesRemoved) {
1874 One,
Or->getName());
1876 return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1886 if (!Cmp.getParent()->getParent()->hasFnAttribute(
1887 Attribute::NoImplicitFloat) &&
1890 Type *FPType = V->getType()->getScalarType();
1891 if (FPType->isIEEELikeFPTy() && C1 == *C2) {
1892 APInt ExponentMask =
1894 if (C1 == ExponentMask) {
1927 Constant *MinSignedC = ConstantInt::get(
1931 return new ICmpInst(NewPred,
X, MinSignedC);
1940 if (
auto *C2 = dyn_cast<ConstantInt>(
Y))
1941 if (
auto *LI = dyn_cast<LoadInst>(
X))
1942 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1943 if (
auto *GV = dyn_cast<GlobalVariable>(
GEP->getOperand(0)))
1948 if (!Cmp.isEquality())
1954 if (Cmp.getOperand(1) ==
Y &&
C.isNegatedPowerOf2()) {
1957 return new ICmpInst(NewPred,
X,
SubOne(cast<Constant>(Cmp.getOperand(1))));
1970 assert(Cmp.isEquality() &&
"Not expecting non-equality predicates");
1972 const APInt *TC, *FC;
1989 X->getType()->isIntOrIntVectorTy(1) && (
C.isZero() ||
C.isOne())) {
1995 return BinaryOperator::CreateAnd(TruncY,
X);
2013 const APInt *Addend, *Msk;
2016 Msk->
isMask() &&
C.ule(*Msk)) {
2017 APInt NewComperand = (
C - *Addend) & *Msk;
2043 while (!WorkList.
empty()) {
2044 auto MatchOrOperatorArgument = [&](
Value *OrOperatorArgument) {
2047 if (
match(OrOperatorArgument,
2053 if (
match(OrOperatorArgument,
2063 Value *OrOperatorLhs, *OrOperatorRhs;
2065 if (!
match(CurrentValue,
2070 MatchOrOperatorArgument(OrOperatorRhs);
2071 MatchOrOperatorArgument(OrOperatorLhs);
2077 CmpValues.
rbegin()->second);
2079 for (
auto It = CmpValues.
rbegin() + 1; It != CmpValues.
rend(); ++It) {
2081 LhsCmp = Builder.
CreateBinOp(BOpc, LhsCmp, RhsCmp);
2097 ConstantInt::get(V->getType(), 1));
2100 Value *OrOp0 =
Or->getOperand(0), *OrOp1 =
Or->getOperand(1);
2105 cast<PossiblyDisjointInst>(
Or)->isDisjoint()) {
2108 return new ICmpInst(Pred, OrOp0, NewC);
2112 if (
match(OrOp1,
m_APInt(MaskC)) && Cmp.isEquality()) {
2113 if (*MaskC ==
C && (
C + 1).isPowerOf2()) {
2118 return new ICmpInst(Pred, OrOp0, OrOp1);
2125 if (
Or->hasOneUse()) {
2127 Constant *NewC = ConstantInt::get(
Or->getType(),
C ^ (*MaskC));
2139 Constant *NewC = ConstantInt::get(
X->getType(), TrueIfSigned ? 1 : 0);
2167 if (!Cmp.isEquality() || !
C.isZero() || !
Or->hasOneUse())
2199 if (Cmp.isEquality() &&
C.isZero() &&
X ==
Mul->getOperand(1) &&
2200 (
Mul->hasNoUnsignedWrap() ||
Mul->hasNoSignedWrap()))
2222 if (Cmp.isEquality()) {
2224 if (
Mul->hasNoSignedWrap() &&
C.srem(*MulC).isZero()) {
2225 Constant *NewC = ConstantInt::get(MulTy,
C.sdiv(*MulC));
2233 if (
C.urem(*MulC).isZero()) {
2236 if ((*MulC & 1).isOne() ||
Mul->hasNoUnsignedWrap()) {
2237 Constant *NewC = ConstantInt::get(MulTy,
C.udiv(*MulC));
2250 if (
C.isMinSignedValue() && MulC->
isAllOnes())
2256 NewC = ConstantInt::get(
2260 "Unexpected predicate");
2261 NewC = ConstantInt::get(
2266 NewC = ConstantInt::get(
2270 "Unexpected predicate");
2271 NewC = ConstantInt::get(
2276 return NewC ?
new ICmpInst(Pred,
X, NewC) :
nullptr;
2288 unsigned TypeBits =
C.getBitWidth();
2290 if (Cmp.isUnsigned()) {
2310 return new ICmpInst(Pred,
Y, ConstantInt::get(ShiftType, CLog2));
2311 }
else if (Cmp.isSigned() && C2->
isOne()) {
2312 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2333 const APInt *ShiftVal;
2363 const APInt *ShiftAmt;
2369 unsigned TypeBits =
C.getBitWidth();
2370 if (ShiftAmt->
uge(TypeBits))
2382 APInt ShiftedC =
C.ashr(*ShiftAmt);
2383 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2386 C.ashr(*ShiftAmt).shl(*ShiftAmt) ==
C) {
2387 APInt ShiftedC =
C.ashr(*ShiftAmt);
2388 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2395 assert(!
C.isMinSignedValue() &&
"Unexpected icmp slt");
2396 APInt ShiftedC = (
C - 1).ashr(*ShiftAmt) + 1;
2397 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2407 APInt ShiftedC =
C.lshr(*ShiftAmt);
2408 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2411 C.lshr(*ShiftAmt).shl(*ShiftAmt) ==
C) {
2412 APInt ShiftedC =
C.lshr(*ShiftAmt);
2413 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2420 assert(
C.ugt(0) &&
"ult 0 should have been eliminated");
2421 APInt ShiftedC = (
C - 1).lshr(*ShiftAmt) + 1;
2422 return new ICmpInst(Pred,
X, ConstantInt::get(ShType, ShiftedC));
2426 if (Cmp.isEquality() && Shl->
hasOneUse()) {
2432 Constant *LShrC = ConstantInt::get(ShType,
C.lshr(*ShiftAmt));
2437 bool TrueIfSigned =
false;
2449 if (Cmp.isUnsigned() && Shl->
hasOneUse()) {
2451 if ((
C + 1).isPowerOf2() &&
2459 if (
C.isPowerOf2() &&
2489 Pred, ConstantInt::get(ShType->
getContext(),
C))) {
2490 CmpPred = FlippedStrictness->first;
2491 RHSC = cast<ConstantInt>(FlippedStrictness->second)->getValue();
2498 ConstantInt::get(TruncTy, RHSC.
ashr(*ShiftAmt).
trunc(TypeBits - Amt));
2517 if (Cmp.isEquality() && Shr->
isExact() &&
C.isZero())
2518 return new ICmpInst(Pred,
X, Cmp.getOperand(1));
2520 bool IsAShr = Shr->
getOpcode() == Instruction::AShr;
2521 const APInt *ShiftValC;
2523 if (Cmp.isEquality())
2541 assert(ShiftValC->
uge(
C) &&
"Expected simplify of compare");
2542 assert((IsUGT || !
C.isZero()) &&
"Expected X u< 0 to simplify");
2544 unsigned CmpLZ = IsUGT ?
C.countl_zero() : (
C - 1).
countl_zero();
2552 const APInt *ShiftAmtC;
2558 unsigned TypeBits =
C.getBitWidth();
2560 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2563 bool IsExact = Shr->
isExact();
2571 (
C - 1).isPowerOf2() &&
C.countLeadingZeros() > ShAmtVal) {
2577 APInt ShiftedC = (
C - 1).shl(ShAmtVal) + 1;
2578 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2584 APInt ShiftedC =
C.shl(ShAmtVal);
2585 if (ShiftedC.
ashr(ShAmtVal) ==
C)
2586 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2590 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2591 if (!
C.isMaxSignedValue() && !(
C + 1).shl(ShAmtVal).isMinSignedValue() &&
2592 (ShiftedC + 1).ashr(ShAmtVal) == (
C + 1))
2593 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2599 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2600 if ((ShiftedC + 1).ashr(ShAmtVal) == (
C + 1) ||
2601 (
C + 1).shl(ShAmtVal).isMinSignedValue())
2602 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2609 if (
C.getBitWidth() > 2 &&
C.getNumSignBits() <= ShAmtVal) {
2619 }
else if (!IsAShr) {
2623 APInt ShiftedC =
C.shl(ShAmtVal);
2624 if (ShiftedC.
lshr(ShAmtVal) ==
C)
2625 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2629 APInt ShiftedC = (
C + 1).shl(ShAmtVal) - 1;
2630 if ((ShiftedC + 1).lshr(ShAmtVal) == (
C + 1))
2631 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy, ShiftedC));
2635 if (!Cmp.isEquality())
2643 assert(((IsAShr &&
C.shl(ShAmtVal).ashr(ShAmtVal) ==
C) ||
2644 (!IsAShr &&
C.shl(ShAmtVal).lshr(ShAmtVal) ==
C)) &&
2645 "Expected icmp+shr simplify did not occur.");
2650 return new ICmpInst(Pred,
X, ConstantInt::get(ShrTy,
C << ShAmtVal));
2656 ConstantInt::get(ShrTy, (
C + 1).shl(ShAmtVal)));
2659 ConstantInt::get(ShrTy, (
C + 1).shl(ShAmtVal) - 1));
2666 Constant *Mask = ConstantInt::get(ShrTy, Val);
2668 return new ICmpInst(Pred,
And, ConstantInt::get(ShrTy,
C << ShAmtVal));
2685 const APInt *DivisorC;
2692 "ult X, 0 should have been simplified already.");
2698 "srem X, 0 should have been simplified already.");
2699 if (!NormalizedC.
uge(DivisorC->
abs() - 1))
2722 const APInt *DivisorC;
2731 !
C.isStrictlyPositive()))
2737 Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2741 return new ICmpInst(Pred,
And, ConstantInt::get(Ty,
C));
2768 assert(*C2 != 0 &&
"udiv 0, X should have been simplified already.");
2773 "icmp ugt X, UINT_MAX should have been simplified already.");
2775 ConstantInt::get(Ty, C2->
udiv(
C + 1)));
2780 assert(
C != 0 &&
"icmp ult X, 0 should have been simplified already.");
2782 ConstantInt::get(Ty, C2->
udiv(
C)));
2796 bool DivIsSigned = Div->
getOpcode() == Instruction::SDiv;
2806 if (Cmp.isEquality() && Div->
hasOneUse() &&
C.isSignBitSet() &&
2807 (!DivIsSigned ||
C.isMinSignedValue())) {
2832 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2851 bool ProdOV = (DivIsSigned ? Prod.
sdiv(*C2) : Prod.
udiv(*C2)) !=
C;
2864 int LoOverflow = 0, HiOverflow = 0;
2865 APInt LoBound, HiBound;
2870 HiOverflow = LoOverflow = ProdOV;
2879 LoBound = -(RangeSize - 1);
2880 HiBound = RangeSize;
2881 }
else if (
C.isStrictlyPositive()) {
2883 HiOverflow = LoOverflow = ProdOV;
2889 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2891 APInt DivNeg = -RangeSize;
2892 LoOverflow =
addWithOverflow(LoBound, HiBound, DivNeg,
true) ? -1 : 0;
2900 LoBound = RangeSize + 1;
2901 HiBound = -RangeSize;
2902 if (HiBound == *C2) {
2906 }
else if (
C.isStrictlyPositive()) {
2909 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2915 LoOverflow = HiOverflow = ProdOV;
2928 if (LoOverflow && HiOverflow)
2932 X, ConstantInt::get(Ty, LoBound));
2935 X, ConstantInt::get(Ty, HiBound));
2939 if (LoOverflow && HiOverflow)
2943 X, ConstantInt::get(Ty, LoBound));
2946 X, ConstantInt::get(Ty, HiBound));
2951 if (LoOverflow == +1)
2953 if (LoOverflow == -1)
2955 return new ICmpInst(Pred,
X, ConstantInt::get(Ty, LoBound));
2958 if (HiOverflow == +1)
2960 if (HiOverflow == -1)
2993 ((Cmp.isUnsigned() && HasNUW) || (Cmp.isSigned() && HasNSW)) &&
2995 return new ICmpInst(SwappedPred,
Y, ConstantInt::get(Ty, SubResult));
3003 if (Cmp.isEquality() &&
C.isZero() &&
3039 (*C2 & (
C - 1)) == (
C - 1))
3052 return new ICmpInst(SwappedPred,
Add, ConstantInt::get(Ty, ~
C));
3058 auto FoldConstant = [&](
bool Val) {
3062 cast<VectorType>(Op0->
getType())->getElementCount(), Res);
3066 switch (Table.to_ulong()) {
3068 return FoldConstant(
false);
3098 return FoldConstant(
true);
3121 unsigned BW =
C.getBitWidth();
3122 std::bitset<4> Table;
3123 auto ComputeTable = [&](
bool Op0Val,
bool Op1Val) {
3126 Res +=
APInt(BW, isa<ZExtInst>(Ext0) ? 1 : -1,
true);
3128 Res +=
APInt(BW, isa<ZExtInst>(Ext1) ? 1 : -1,
true);
3132 Table[0] = ComputeTable(
false,
false);
3133 Table[1] = ComputeTable(
false,
true);
3134 Table[2] = ComputeTable(
true,
false);
3135 Table[3] = ComputeTable(
true,
true);
3150 if ((
Add->hasNoSignedWrap() &&
3152 (
Add->hasNoUnsignedWrap() &&
3156 Cmp.isSigned() ?
C.ssub_ov(*C2, Overflow) :
C.usub_ov(*C2, Overflow);
3162 return new ICmpInst(Pred,
X, ConstantInt::get(Ty, NewC));
3166 C.isNonNegative() && (
C - *C2).isNonNegative() &&
3169 ConstantInt::get(Ty,
C - *C2));
3174 if (Cmp.isSigned()) {
3175 if (
Lower.isSignMask())
3177 if (
Upper.isSignMask())
3180 if (
Lower.isMinValue())
3182 if (
Upper.isMinValue())
3215 if (!
Add->hasOneUse())
3230 ConstantInt::get(Ty,
C * 2));
3245 ConstantInt::get(Ty, ~
C));
3250 Type *NewCmpTy = V->getType();
3252 if (shouldChangeType(Ty, NewCmpTy)) {
3253 if (CR.getActiveBits() <= NewCmpBW) {
3265 ConstantInt::get(NewCmpTy, EquivInt));
3288 Value *EqualVal = SI->getTrueValue();
3289 Value *UnequalVal = SI->getFalseValue();
3312 auto FlippedStrictness =
3314 if (!FlippedStrictness)
3317 "basic correctness failure");
3318 RHS2 = FlippedStrictness->second;
3330 assert(
C &&
"Cmp RHS should be a constant int!");
3336 Value *OrigLHS, *OrigRHS;
3337 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
3338 if (Cmp.hasOneUse() &&
3341 assert(C1LessThan && C2Equal && C3GreaterThan);
3344 C1LessThan->
getValue(),
C->getValue(), Cmp.getPredicate());
3346 Cmp.getPredicate());
3348 C3GreaterThan->
getValue(),
C->getValue(), Cmp.getPredicate());
3359 if (TrueWhenLessThan)
3365 if (TrueWhenGreaterThan)
3375 auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
3380 Value *Op1 = Cmp.getOperand(1);
3381 Value *BCSrcOp = Bitcast->getOperand(0);
3382 Type *SrcType = Bitcast->getSrcTy();
3383 Type *DstType = Bitcast->getType();
3403 return new ICmpInst(Pred,
X, ConstantInt::get(
X->getType(), 1));
3430 Type *XType =
X->getType();
3435 if (
auto *XVTy = dyn_cast<VectorType>(XType))
3449 if (!Cmp.getParent()->getParent()->hasFnAttribute(
3450 Attribute::NoImplicitFloat) &&
3451 Cmp.isEquality() && FPType->isIEEELikeFPTy()) {
3475 if (Cmp.isEquality() &&
C->isAllOnes() && Bitcast->hasOneUse()) {
3476 if (
Value *NotBCSrcOp =
3487 if (Cmp.isEquality() &&
C->isZero() && Bitcast->hasOneUse() &&
3489 if (
auto *VecTy = dyn_cast<FixedVectorType>(
X->getType())) {
3508 auto *VecTy = cast<VectorType>(SrcType);
3509 auto *EltTy = cast<IntegerType>(VecTy->getElementType());
3510 if (
C->isSplat(EltTy->getBitWidth())) {
3518 Value *NewC = ConstantInt::get(EltTy,
C->trunc(EltTy->getBitWidth()));
3519 return new ICmpInst(Pred, Extract, NewC);
3532 if (
auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0)))
3536 if (
auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0)))
3540 if (
auto *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
3544 if (
auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0)))
3548 if (
auto *
II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
3555 Value *Cmp0 = Cmp.getOperand(0);
3557 if (
C->isZero() && Cmp.isEquality() && Cmp0->
hasOneUse() &&
3559 m_ExtractValue<0>(m_Intrinsic<Intrinsic::ssub_with_overflow>(
3562 m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
3564 return new ICmpInst(Cmp.getPredicate(),
X,
Y);
3579 if (!Cmp.isEquality())
3584 Constant *
RHS = cast<Constant>(Cmp.getOperand(1));
3588 case Instruction::SRem:
3599 case Instruction::Add: {
3603 if (
Constant *C2 = dyn_cast<Constant>(BOp1)) {
3606 }
else if (
C.isZero()) {
3609 if (
Value *NegVal = dyn_castNegVal(BOp1))
3610 return new ICmpInst(Pred, BOp0, NegVal);
3611 if (
Value *NegVal = dyn_castNegVal(BOp0))
3612 return new ICmpInst(Pred, NegVal, BOp1);
3621 return new ICmpInst(Pred, BOp0, Neg);
3626 case Instruction::Xor:
3627 if (
Constant *BOC = dyn_cast<Constant>(BOp1)) {
3631 }
else if (
C.isZero()) {
3633 return new ICmpInst(Pred, BOp0, BOp1);
3636 case Instruction::Or: {
3657 Cond->getType() == Cmp.getType()) {
3695 case Instruction::UDiv:
3696 case Instruction::SDiv:
3706 return new ICmpInst(Pred, BOp0, BOp1);
3709 Instruction::Mul, BO->
getOpcode() == Instruction::SDiv, BOp1,
3710 Cmp.getOperand(1), BO);
3714 return new ICmpInst(Pred, YC, BOp0);
3718 if (BO->
getOpcode() == Instruction::UDiv &&
C.isZero()) {
3721 return new ICmpInst(NewPred, BOp1, BOp0);
3735 "Non-ctpop intrin in ctpop fold");
3771 Type *Ty =
II->getType();
3775 switch (
II->getIntrinsicID()) {
3776 case Intrinsic::abs:
3779 if (
C.isZero() ||
C.isMinSignedValue())
3780 return new ICmpInst(Pred,
II->getArgOperand(0), ConstantInt::get(Ty,
C));
3783 case Intrinsic::bswap:
3785 return new ICmpInst(Pred,
II->getArgOperand(0),
3786 ConstantInt::get(Ty,
C.byteSwap()));
3788 case Intrinsic::bitreverse:
3790 return new ICmpInst(Pred,
II->getArgOperand(0),
3791 ConstantInt::get(Ty,
C.reverseBits()));
3793 case Intrinsic::ctlz:
3794 case Intrinsic::cttz: {
3797 return new ICmpInst(Pred,
II->getArgOperand(0),
3803 unsigned Num =
C.getLimitedValue(
BitWidth);
3805 bool IsTrailing =
II->getIntrinsicID() == Intrinsic::cttz;
3808 APInt Mask2 = IsTrailing
3812 ConstantInt::get(Ty, Mask2));
3817 case Intrinsic::ctpop: {
3820 bool IsZero =
C.isZero();
3822 return new ICmpInst(Pred,
II->getArgOperand(0),
3829 case Intrinsic::fshl:
3830 case Intrinsic::fshr:
3831 if (
II->getArgOperand(0) ==
II->getArgOperand(1)) {
3832 const APInt *RotAmtC;
3836 return new ICmpInst(Pred,
II->getArgOperand(0),
3837 II->getIntrinsicID() == Intrinsic::fshl
3838 ? ConstantInt::get(Ty,
C.rotr(*RotAmtC))
3839 : ConstantInt::get(Ty,
C.rotl(*RotAmtC)));
3843 case Intrinsic::umax:
3844 case Intrinsic::uadd_sat: {
3847 if (
C.isZero() &&
II->hasOneUse()) {
3854 case Intrinsic::ssub_sat:
3857 return new ICmpInst(Pred,
II->getArgOperand(0),
II->getArgOperand(1));
3859 case Intrinsic::usub_sat: {
3864 return new ICmpInst(NewPred,
II->getArgOperand(0),
II->getArgOperand(1));
3879 assert(Cmp.isEquality());
3882 Value *Op0 = Cmp.getOperand(0);
3883 Value *Op1 = Cmp.getOperand(1);
3884 const auto *IIOp0 = dyn_cast<IntrinsicInst>(Op0);
3885 const auto *IIOp1 = dyn_cast<IntrinsicInst>(Op1);
3886 if (!IIOp0 || !IIOp1 || IIOp0->getIntrinsicID() != IIOp1->getIntrinsicID())
3889 switch (IIOp0->getIntrinsicID()) {
3890 case Intrinsic::bswap:
3891 case Intrinsic::bitreverse:
3894 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3895 case Intrinsic::fshl:
3896 case Intrinsic::fshr: {
3899 if (IIOp0->getOperand(0) != IIOp0->getOperand(1))
3901 if (IIOp1->getOperand(0) != IIOp1->getOperand(1))
3903 if (IIOp0->getOperand(2) == IIOp1->getOperand(2))
3904 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3910 unsigned OneUses = IIOp0->hasOneUse() + IIOp1->hasOneUse();
3915 Builder.
CreateSub(IIOp0->getOperand(2), IIOp1->getOperand(2));
3917 Op0->
getType(), IIOp0->getIntrinsicID(),
3918 {IIOp0->getOperand(0), IIOp0->getOperand(0), SubAmt});
3919 return new ICmpInst(Pred, IIOp1->getOperand(0), CombinedRotate);
3936 if (
auto *
II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0))) {
3937 switch (
II->getIntrinsicID()) {
3940 case Intrinsic::fshl:
3941 case Intrinsic::fshr:
3942 if (Cmp.isEquality() &&
II->getArgOperand(0) ==
II->getArgOperand(1)) {
3944 if (
C.isZero() ||
C.isAllOnes())
3945 return new ICmpInst(Pred,
II->getArgOperand(0), Cmp.getOperand(1));
3959 case Instruction::Xor:
3963 case Instruction::And:
3967 case Instruction::Or:
3971 case Instruction::Mul:
3975 case Instruction::Shl:
3979 case Instruction::LShr:
3980 case Instruction::AShr:
3984 case Instruction::SRem:
3988 case Instruction::UDiv:
3992 case Instruction::SDiv:
3996 case Instruction::Sub:
4000 case Instruction::Add:
4018 if (!
II->hasOneUse())
4034 Value *Op0 =
II->getOperand(0);
4035 Value *Op1 =
II->getOperand(1);
4044 switch (
II->getIntrinsicID()) {
4047 "This function only works with usub_sat and uadd_sat for now!");
4048 case Intrinsic::uadd_sat:
4051 case Intrinsic::usub_sat:
4061 II->getBinaryOp(), *COp1,
II->getNoWrapKind());
4068 if (
II->getBinaryOp() == Instruction::Add)
4074 SatValCheck ? Instruction::BinaryOps::Or : Instruction::BinaryOps::And;
4076 std::optional<ConstantRange> Combination;
4077 if (CombiningOp == Instruction::BinaryOps::Or)
4089 Combination->getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
4094 ConstantInt::get(Op1->
getType(), EquivInt));
4101 std::optional<ICmpInst::Predicate> NewPredicate = std::nullopt;
4106 NewPredicate = Pred;
4110 else if (
C.isAllOnes())
4118 else if (
C.isZero())
4135 if (!
C.isZero() && !
C.isAllOnes())
4146 if (
I->getIntrinsicID() == Intrinsic::scmp)
4160 switch (
II->getIntrinsicID()) {
4163 case Intrinsic::uadd_sat:
4164 case Intrinsic::usub_sat:
4169 case Intrinsic::ctpop: {
4174 case Intrinsic::scmp:
4175 case Intrinsic::ucmp:
4181 if (Cmp.isEquality())
4184 Type *Ty =
II->getType();
4186 switch (
II->getIntrinsicID()) {
4187 case Intrinsic::ctpop: {
4199 case Intrinsic::ctlz: {
4202 unsigned Num =
C.getLimitedValue();
4205 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4210 unsigned Num =
C.getLimitedValue();
4213 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4217 case Intrinsic::cttz: {
4219 if (!
II->hasOneUse())
4239 case Intrinsic::ssub_sat:
4243 return new ICmpInst(Pred,
II->getArgOperand(0),
II->getArgOperand(1));
4247 II->getArgOperand(1));
4251 II->getArgOperand(1));
4263 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
4264 Constant *RHSC = dyn_cast<Constant>(Op1);
4270 case Instruction::PHI:
4274 case Instruction::IntToPtr:
4283 case Instruction::Load:
4286 dyn_cast<GetElementPtrInst>(LHSI->
getOperand(0)))
4301 auto SimplifyOp = [&](
Value *
Op,
bool SelectCondIsTrue) ->
Value * {
4305 SI->getCondition(), Pred,
Op,
RHS,
DL, SelectCondIsTrue))
4306 return ConstantInt::get(
I.getType(), *Impl);
4311 Value *Op1 = SimplifyOp(SI->getOperand(1),
true);
4313 CI = dyn_cast<ConstantInt>(Op1);
4315 Value *Op2 = SimplifyOp(SI->getOperand(2),
false);
4317 CI = dyn_cast<ConstantInt>(Op2);
4319 auto Simplifies = [&](
Value *
Op,
unsigned Idx) {
4323 (isa<CmpIntrinsic>(SI->getOperand(
Idx)) &&
4334 bool Transform =
false;
4337 else if (Simplifies(Op1, 1) || Simplifies(Op2, 2)) {
4339 if (SI->hasOneUse())
4342 else if (CI && !CI->
isZero())
4361 unsigned Depth = 0) {
4364 if (V->getType()->getScalarSizeInBits() == 1)
4372 switch (
I->getOpcode()) {
4373 case Instruction::ZExt:
4376 case Instruction::SExt:
4380 case Instruction::And:
4381 case Instruction::Or:
4388 case Instruction::Xor:
4398 case Instruction::Select:
4402 case Instruction::Shl:
4405 case Instruction::LShr:
4408 case Instruction::AShr:
4412 case Instruction::Add:
4418 case Instruction::Sub:
4424 case Instruction::Call: {
4425 if (
auto *
II = dyn_cast<IntrinsicInst>(
I)) {
4426 switch (
II->getIntrinsicID()) {
4429 case Intrinsic::umax:
4430 case Intrinsic::smax:
4431 case Intrinsic::umin:
4432 case Intrinsic::smin:
4437 case Intrinsic::bitreverse:
4527 auto IsLowBitMask = [&]() {
4545 auto Check = [&]() {
4563 auto Check = [&]() {
4582 if (!IsLowBitMask())
4601 const APInt *C0, *C1;
4618 const APInt &MaskedBits = *C0;
4619 assert(MaskedBits != 0 &&
"shift by zero should be folded away already.");
4640 auto *XType =
X->getType();
4641 const unsigned XBitWidth = XType->getScalarSizeInBits();
4643 assert(
BitWidth.ugt(MaskedBits) &&
"shifts should leave some bits untouched");
4674 !
I.getOperand(0)->hasOneUse())
4699 assert(NarrowestTy ==
I.getOperand(0)->getType() &&
4700 "We did not look past any shifts while matching XShift though.");
4701 bool HadTrunc = WidestTy !=
I.getOperand(0)->getType();
4708 auto XShiftOpcode = XShift->
getOpcode();
4709 if (XShiftOpcode == YShift->
getOpcode())
4712 Value *
X, *XShAmt, *
Y, *YShAmt;
4719 if (!isa<Constant>(
X) && !isa<Constant>(
Y)) {
4721 if (!
match(
I.getOperand(0),
4747 unsigned MaximalPossibleTotalShiftAmount =
4750 APInt MaximalRepresentableShiftAmount =
4752 if (MaximalRepresentableShiftAmount.
ult(MaximalPossibleTotalShiftAmount))
4756 auto *NewShAmt = dyn_cast_or_null<Constant>(
4761 if (NewShAmt->getType() != WidestTy) {
4771 if (!
match(NewShAmt,
4773 APInt(WidestBitWidth, WidestBitWidth))))
4778 auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
4784 ? NewShAmt->getSplatValue()
4787 if (NewShAmtSplat &&
4793 if (
auto *
C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
4797 unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
4798 if (MaxActiveBits <= 1)
4804 if (
auto *
C = dyn_cast<Constant>(WidestShift->
getOperand(0))) {
4808 unsigned MaxActiveBits = Known.
getBitWidth() - MinLeadZero;
4809 if (MaxActiveBits <= 1)
4812 if (NewShAmtSplat) {
4815 if (AdjNewShAmt.
ule(MinLeadZero))
4829 Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
4851 if (!
I.isEquality() &&
4861 NeedNegation =
false;
4864 NeedNegation =
true;
4870 if (
I.isEquality() &&
4886 bool MulHadOtherUses =
Mul && !
Mul->hasOneUse();
4887 if (MulHadOtherUses)
4891 Div->
getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
4892 : Intrinsic::smul_with_overflow,
4893 X->getType(), {X, Y},
nullptr,
"mul");
4898 if (MulHadOtherUses)
4907 if (MulHadOtherUses)
4933 Type *Ty =
X->getType();
4947 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5009 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5044 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1), *
A;
5060 return new ICmpInst(PredOut, Op0, Op1);
5072 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5140 return new ICmpInst(NewPred, Op1, Zero);
5149 return new ICmpInst(NewPred, Op0, Zero);
5153 bool NoOp0WrapProblem =
false, NoOp1WrapProblem =
false;
5154 bool Op0HasNUW =
false, Op1HasNUW =
false;
5155 bool Op0HasNSW =
false, Op1HasNSW =
false;
5159 bool &HasNSW,
bool &HasNUW) ->
bool {
5160 if (isa<OverflowingBinaryOperator>(BO)) {
5166 }
else if (BO.
getOpcode() == Instruction::Or) {
5174 Value *
A =
nullptr, *
B =
nullptr, *
C =
nullptr, *
D =
nullptr;
5178 NoOp0WrapProblem = hasNoWrapProblem(*BO0, Pred, Op0HasNSW, Op0HasNUW);
5182 NoOp1WrapProblem = hasNoWrapProblem(*BO1, Pred, Op1HasNSW, Op1HasNUW);
5187 if ((
A == Op1 ||
B == Op1) && NoOp0WrapProblem)
5193 if ((
C == Op0 ||
D == Op0) && NoOp1WrapProblem)
5198 if (
A &&
C && (
A ==
C ||
A ==
D ||
B ==
C ||
B ==
D) && NoOp0WrapProblem &&
5206 }
else if (
A ==
D) {
5210 }
else if (
B ==
C) {
5291 if (
A &&
C && NoOp0WrapProblem && NoOp1WrapProblem &&
5293 const APInt *AP1, *AP2;
5301 if (AP1Abs.
uge(AP2Abs)) {
5302 APInt Diff = *AP1 - *AP2;
5305 A, C3,
"", Op0HasNUW && Diff.
ule(*AP1), Op0HasNSW);
5308 APInt Diff = *AP2 - *AP1;
5311 C, C3,
"", Op1HasNUW && Diff.
ule(*AP2), Op1HasNSW);
5330 if (BO0 && BO0->
getOpcode() == Instruction::Sub) {
5334 if (BO1 && BO1->
getOpcode() == Instruction::Sub) {
5340 if (
A == Op1 && NoOp0WrapProblem)
5343 if (
C == Op0 && NoOp1WrapProblem)
5363 if (
B &&
D &&
B ==
D && NoOp0WrapProblem && NoOp1WrapProblem)
5367 if (
A &&
C &&
A ==
C && NoOp0WrapProblem && NoOp1WrapProblem)
5374 if (
Constant *RHSC = dyn_cast<Constant>(Op1))
5375 if (RHSC->isNotMinSignedValue())
5376 return new ICmpInst(
I.getSwappedPredicate(),
X,
5394 if (Op0HasNSW && Op1HasNSW) {
5407 if (GreaterThan &&
match(GreaterThan,
m_One()))
5414 if (((Op0HasNSW && Op1HasNSW) || (Op0HasNUW && Op1HasNUW)) &&
5426 if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
5433 if (NonZero && BO0 && BO1 && Op0HasNUW && Op1HasNUW)
5444 else if (BO1 && BO1->
getOpcode() == Instruction::SRem &&
5474 case Instruction::Add:
5475 case Instruction::Sub:
5476 case Instruction::Xor: {
5483 if (
C->isSignMask()) {
5489 if (BO0->
getOpcode() == Instruction::Xor &&
C->isMaxSignedValue()) {
5491 NewPred =
I.getSwappedPredicate(NewPred);
5497 case Instruction::Mul: {
5498 if (!
I.isEquality())
5506 if (
unsigned TZs =
C->countr_zero()) {
5512 return new ICmpInst(Pred, And1, And2);
5517 case Instruction::UDiv:
5518 case Instruction::LShr:
5523 case Instruction::SDiv:
5529 case Instruction::AShr:
5534 case Instruction::Shl: {
5535 bool NUW = Op0HasNUW && Op1HasNUW;
5536 bool NSW = Op0HasNSW && Op1HasNSW;
5539 if (!NSW &&
I.isSigned())
5603 auto IsCondKnownTrue = [](
Value *Val) -> std::optional<bool> {
5605 return std::nullopt;
5610 return std::nullopt;
5614 if (!CmpXZ.has_value() && !CmpYZ.has_value())
5616 if (!CmpXZ.has_value()) {
5622 if (CmpYZ.has_value())
5646 if (!MinMaxCmpXZ.has_value()) {
5654 if (!MinMaxCmpXZ.has_value())
5670 return FoldIntoCmpYZ();
5697 return FoldIntoCmpYZ();
5706 return FoldIntoCmpYZ();
5728 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5732 if (
I.isEquality()) {
5767 Type *Ty =
A->getType();
5770 ConstantInt::get(Ty, 2))
5772 ConstantInt::get(Ty, 1));
5779 if (!
I.isEquality())
5782 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
5786 if (
A == Op1 ||
B == Op1) {
5787 Value *OtherVal =
A == Op1 ?
B :
A;
5815 Value *OtherVal =
A == Op0 ?
B :
A;
5822 Value *
X =
nullptr, *
Y =
nullptr, *Z =
nullptr;
5828 }
else if (
A ==
D) {
5832 }
else if (
B ==
C) {
5836 }
else if (
B ==
D) {
5846 const APInt *C0, *C1;
5848 (*C0 ^ *C1).isNegatedPowerOf2();
5854 int(Op0->
hasOneUse()) + int(Op1->hasOneUse()) +
5856 if (XorIsNegP2 || UseCnt >= 2) {
5879 (Op0->
hasOneUse() || Op1->hasOneUse())) {
5884 MaskC->
countr_one() ==
A->getType()->getScalarSizeInBits())
5890 const APInt *AP1, *AP2;
5899 if (ShAmt < TypeBits && ShAmt != 0) {
5904 return new ICmpInst(NewPred,
Xor, ConstantInt::get(
A->getType(), CmpVal));
5914 if (ShAmt < TypeBits && ShAmt != 0) {
5918 I.getName() +
".mask");
5932 unsigned ASize = cast<IntegerType>(
A->getType())->getPrimitiveSizeInBits();
5934 if (ShAmt < ASize) {
5957 A->getType()->getScalarSizeInBits() ==
BitWidth * 2 &&
5958 (
I.getOperand(0)->hasOneUse() ||
I.getOperand(1)->hasOneUse())) {
5963 Add, ConstantInt::get(
A->getType(),
C.shl(1)));
5986 m_OneUse(m_Intrinsic<Intrinsic::fshr>(
6005 std::optional<bool> IsZero = std::nullopt;
6043 Constant *
C = ConstantInt::get(Res->X->getType(), Res->C);
6047 unsigned SrcBits =
X->getType()->getScalarSizeInBits();
6048 if (
auto *
II = dyn_cast<IntrinsicInst>(
X)) {
6049 if (
II->getIntrinsicID() == Intrinsic::cttz ||
6050 II->getIntrinsicID() == Intrinsic::ctlz) {
6051 unsigned MaxRet = SrcBits;
6071 assert(isa<CastInst>(ICmp.
getOperand(0)) &&
"Expected cast for operand 0");
6072 auto *CastOp0 = cast<CastInst>(ICmp.
getOperand(0));
6077 bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
6078 bool IsSignedCmp = ICmp.
isSigned();
6083 bool IsZext0 = isa<ZExtInst>(ICmp.
getOperand(0));
6084 bool IsZext1 = isa<ZExtInst>(ICmp.
getOperand(1));
6086 if (IsZext0 != IsZext1) {
6091 if (ICmp.
isEquality() &&
X->getType()->isIntOrIntVectorTy(1) &&
6092 Y->getType()->isIntOrIntVectorTy(1))
6099 auto *NonNegInst0 = dyn_cast<PossiblyNonNegInst>(ICmp.
getOperand(0));
6100 auto *NonNegInst1 = dyn_cast<PossiblyNonNegInst>(ICmp.
getOperand(1));
6102 bool IsNonNeg0 = NonNegInst0 && NonNegInst0->hasNonNeg();
6103 bool IsNonNeg1 = NonNegInst1 && NonNegInst1->hasNonNeg();
6105 if ((IsZext0 && IsNonNeg0) || (IsZext1 && IsNonNeg1))
6112 Type *XTy =
X->getType(), *YTy =
Y->getType();
6119 IsSignedExt ? Instruction::SExt : Instruction::ZExt;
6135 if (IsSignedCmp && IsSignedExt)
6148 Type *SrcTy = CastOp0->getSrcTy();
6156 if (IsSignedExt && IsSignedCmp)
6168 if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(
C))
6187 Value *SimplifiedOp0 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(0));
6188 Value *SimplifiedOp1 = simplifyIntToPtrRoundTripCast(ICmp.
getOperand(1));
6189 if (SimplifiedOp0 || SimplifiedOp1)
6191 SimplifiedOp0 ? SimplifiedOp0 : ICmp.
getOperand(0),
6192 SimplifiedOp1 ? SimplifiedOp1 : ICmp.
getOperand(1));
6194 auto *CastOp0 = dyn_cast<CastInst>(ICmp.
getOperand(0));
6200 Value *Op0Src = CastOp0->getOperand(0);
6201 Type *SrcTy = CastOp0->getSrcTy();
6202 Type *DestTy = CastOp0->getDestTy();
6206 auto CompatibleSizes = [&](
Type *PtrTy,
Type *IntTy) {
6207 if (isa<VectorType>(PtrTy)) {
6208 PtrTy = cast<VectorType>(PtrTy)->getElementType();
6209 IntTy = cast<VectorType>(IntTy)->getElementType();
6213 if (CastOp0->getOpcode() == Instruction::PtrToInt &&
6214 CompatibleSizes(SrcTy, DestTy)) {
6215 Value *NewOp1 =
nullptr;
6216 if (
auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.
getOperand(1))) {
6217 Value *PtrSrc = PtrToIntOp1->getOperand(0);
6219 NewOp1 = PtrToIntOp1->getOperand(0);
6220 }
else if (
auto *RHSC = dyn_cast<Constant>(ICmp.
getOperand(1))) {
6229 if (CastOp0->getOpcode() == Instruction::IntToPtr &&
6230 CompatibleSizes(DestTy, SrcTy)) {
6231 Value *NewOp1 =
nullptr;
6232 if (
auto *IntToPtrOp1 = dyn_cast<IntToPtrInst>(ICmp.
getOperand(1))) {
6233 Value *IntSrc = IntToPtrOp1->getOperand(0);
6235 NewOp1 = IntToPtrOp1->getOperand(0);
6236 }
else if (
auto *RHSC = dyn_cast<Constant>(ICmp.
getOperand(1))) {
6254 case Instruction::Add:
6255 case Instruction::Sub:
6257 case Instruction::Mul:
6270 case Instruction::Add:
6275 case Instruction::Sub:
6280 case Instruction::Mul:
6289 bool IsSigned,
Value *LHS,
6293 if (OrigI.
isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
6303 if (
auto *LHSTy = dyn_cast<VectorType>(
LHS->
getType()))
6318 Result->takeName(&OrigI);
6323 Result->takeName(&OrigI);
6325 if (
auto *Inst = dyn_cast<Instruction>(Result)) {
6327 Inst->setHasNoSignedWrap();
6329 Inst->setHasNoUnsignedWrap();
6352 const APInt *OtherVal,
6356 if (!isa<IntegerType>(MulVal->
getType()))
6359 auto *MulInstr = dyn_cast<Instruction>(MulVal);
6362 assert(MulInstr->getOpcode() == Instruction::Mul);
6364 auto *
LHS = cast<ZExtInst>(MulInstr->getOperand(0)),
6365 *
RHS = cast<ZExtInst>(MulInstr->getOperand(1));
6366 assert(
LHS->getOpcode() == Instruction::ZExt);
6367 assert(
RHS->getOpcode() == Instruction::ZExt);
6371 Type *TyA =
A->getType(), *TyB =
B->getType();
6373 WidthB = TyB->getPrimitiveSizeInBits();
6376 if (WidthB > WidthA) {
6391 if (
TruncInst *TI = dyn_cast<TruncInst>(U)) {
6394 if (TruncWidth > MulWidth)
6398 if (BO->getOpcode() != Instruction::And)
6400 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
6401 const APInt &CVal = CI->getValue();
6417 switch (
I.getPredicate()) {
6424 if (MaxVal.
eq(*OtherVal))
6434 if (MaxVal.
eq(*OtherVal))
6448 if (WidthA < MulWidth)
6450 if (WidthB < MulWidth)
6454 {MulA, MulB},
nullptr,
"umul");
6465 if (
TruncInst *TI = dyn_cast<TruncInst>(U)) {
6466 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
6471 assert(BO->getOpcode() == Instruction::And);
6473 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
6509 switch (
I.getPredicate()) {
6540 assert(DI && UI &&
"Instruction not defined\n");
6551 auto *Usr = cast<Instruction>(U);
6552 if (Usr != UI && !
DT.
dominates(DB, Usr->getParent()))
6563 auto *BI = dyn_cast_or_null<BranchInst>(BB->
getTerminator());
6564 if (!BI || BI->getNumSuccessors() != 2)
6566 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
6567 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
6614 const unsigned SIOpd) {
6615 assert((SIOpd == 1 || SIOpd == 2) &&
"Invalid select operand!");
6617 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
6631 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
6641 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
6670 if (!isa<Constant>(Op0) && Op0Known.
isConstant())
6673 if (!isa<Constant>(Op1) && Op1Known.
isConstant())
6702 if (!Cmp.hasOneUse())
6711 if (!isMinMaxCmp(
I)) {
6716 if (Op1Min == Op0Max)
6721 if (*CmpC == Op0Min + 1)
6723 ConstantInt::get(Op1->getType(), *CmpC - 1));
6733 if (Op1Max == Op0Min)
6738 if (*CmpC == Op0Max - 1)
6740 ConstantInt::get(Op1->getType(), *CmpC + 1));
6750 if (Op1Min == Op0Max)
6754 if (*CmpC == Op0Min + 1)
6756 ConstantInt::get(Op1->getType(), *CmpC - 1));
6761 if (Op1Max == Op0Min)
6765 if (*CmpC == Op0Max - 1)
6767 ConstantInt::get(Op1->getType(), *CmpC + 1));
6784 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
6790 *LHSC != Op0KnownZeroInverted)
6796 Type *XTy =
X->getType();
6798 APInt C2 = Op0KnownZeroInverted;
6799 APInt C2Pow2 = (C2 & ~(*C1 - 1)) + *C1;
6805 auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1);
6815 (Op0Known & Op1Known) == Op0Known)
6821 if (Op1Min == Op0Max)
6825 if (Op1Max == Op0Min)
6829 if (Op1Min == Op0Max)
6833 if (Op1Max == Op0Min)
6841 if ((
I.isSigned() || (
I.isUnsigned() && !
I.hasSameSign())) &&
6844 I.setPredicate(
I.getUnsignedPredicate());
6879 bool IsSExt = ExtI->
getOpcode() == Instruction::SExt;
6881 auto CreateRangeCheck = [&] {
6896 }
else if (!IsSExt || HasOneUse) {
6901 return CreateRangeCheck();
6903 }
else if (IsSExt ?
C->isAllOnes() :
C->isOne()) {
6911 }
else if (!IsSExt || HasOneUse) {
6916 return CreateRangeCheck();
6930 Instruction::ICmp, Pred1,
X,
6949 Value *Op0 =
I.getOperand(0);
6950 Value *Op1 =
I.getOperand(1);
6951 auto *Op1C = dyn_cast<Constant>(Op1);
6956 if (!FlippedStrictness)
6959 return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
6977 I.setName(
I.getName() +
".not");
6988 Value *
A =
I.getOperand(0), *
B =
I.getOperand(1);
6989 assert(
A->getType()->isIntOrIntVectorTy(1) &&
"Bools only");
6995 switch (
I.getPredicate()) {
7004 switch (
I.getPredicate()) {
7014 switch (
I.getPredicate()) {
7023 return BinaryOperator::CreateXor(
A,
B);
7031 return BinaryOperator::CreateAnd(Builder.
CreateNot(
A),
B);
7039 return BinaryOperator::CreateAnd(Builder.
CreateNot(
B),
A);
7047 return BinaryOperator::CreateOr(Builder.
CreateNot(
A),
B);
7055 return BinaryOperator::CreateOr(Builder.
CreateNot(
B),
A);
7111 Value *
LHS = Cmp.getOperand(0), *
RHS = Cmp.getOperand(1);
7116 if (
auto *
I = dyn_cast<Instruction>(V))
7117 I->copyIRFlags(&Cmp);
7118 Module *M = Cmp.getModule();
7120 M, Intrinsic::vector_reverse, V->getType());
7128 return createCmpReverse(Pred, V1, V2);
7132 return createCmpReverse(Pred, V1,
RHS);
7136 return createCmpReverse(Pred,
LHS, V2);
7161 Constant *ScalarC =
C->getSplatValue(
true);
7180 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
7184 auto UAddOvResultPat = m_ExtractValue<0>(
7186 if (
match(Op0, UAddOvResultPat) &&
7195 UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
7196 else if (
match(Op1, UAddOvResultPat) &&
7199 UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
7207 if (!
I.getOperand(0)->getType()->isPointerTy() ||
7209 I.getParent()->getParent(),
7210 I.getOperand(0)->getType()->getPointerAddressSpace())) {
7216 Op->isLaunderOrStripInvariantGroup()) {
7218 Op->getOperand(0),
I.getOperand(1));
7230 if (
I.getType()->isVectorTy())
7252 auto *LHSTy = dyn_cast<FixedVectorType>(
LHS->
getType());
7253 if (!LHSTy || !LHSTy->getElementType()->isIntegerTy())
7256 LHSTy->getNumElements() * LHSTy->getElementType()->getIntegerBitWidth();
7258 if (!
DL.isLegalInteger(NumBits))
7262 auto *ScalarTy = Builder.
getIntNTy(NumBits);
7277 if (
auto *
GEP = dyn_cast<GEPOperator>(Op0))
7281 if (
auto *SI = dyn_cast<SelectInst>(Op0))
7285 if (
auto *
MinMax = dyn_cast<MinMaxIntrinsic>(Op0))
7316 bool IsIntMinPosion =
C->isAllOnesValue();
7328 CxtI, IsIntMinPosion
7331 X, ConstantInt::get(
X->getType(),
SMin + 1)));
7337 CxtI, IsIntMinPosion
7340 X, ConstantInt::get(
X->getType(),
SMin)));
7353 auto CheckUGT1 = [](
const APInt &Divisor) {
return Divisor.ugt(1); };
7368 auto CheckNE0 = [](
const APInt &Shift) {
return !Shift.isZero(); };
7386 bool Changed =
false;
7388 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
7395 if (Op0Cplxity < Op1Cplxity) {
7410 if (
Value *V = dyn_castNegVal(SelectTrue)) {
7411 if (V == SelectFalse)
7414 else if (
Value *V = dyn_castNegVal(SelectFalse)) {
7415 if (V == SelectTrue)
7456 if (
SelectInst *SI = dyn_cast<SelectInst>(
I.user_back())) {
7514 if (
I.isCommutative()) {
7515 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
7539 (Op0->
hasOneUse() || Op1->hasOneUse())) {
7555 assert(Op1->getType()->isPointerTy() &&
"Comparing pointer with non-pointer?");
7584 bool ConsumesOp0, ConsumesOp1;
7587 (ConsumesOp0 || ConsumesOp1)) {
7590 assert(InvOp0 && InvOp1 &&
7591 "Mismatch between isFreeToInvert and getFreelyInverted");
7592 return new ICmpInst(
I.getSwappedPredicate(), InvOp0, InvOp1);
7599 isa<IntegerType>(
X->getType())) {
7604 if (AddI->
getOpcode() == Instruction::Add &&
7605 OptimizeOverflowCheck(Instruction::Add,
false,
X,
Y, *AddI,
7606 Result, Overflow)) {
7624 if ((
I.isUnsigned() ||
I.isEquality()) &&
7627 Y->getType()->getScalarSizeInBits() == 1 &&
7628 (Op0->
hasOneUse() || Op1->hasOneUse())) {
7635 unsigned ShiftOpc = ShiftI->
getOpcode();
7636 if ((ExtOpc == Instruction::ZExt && ShiftOpc == Instruction::LShr) ||
7637 (ExtOpc == Instruction::SExt && ShiftOpc == Instruction::AShr)) {
7666 if (
auto *EVI = dyn_cast<ExtractValueInst>(Op0))
7667 if (
auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
7668 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
7675 if (
I.getType()->isVectorTy())
7687 const APInt *C1, *C2;
7694 Type *InputTy =
A->getType();
7701 TruncC1.
setBit(InputBitWidth - 1);
7705 ConstantInt::get(InputTy, C2->
trunc(InputBitWidth)));
7711 return Changed ? &
I :
nullptr;
7725 if (MantissaWidth == -1)
return nullptr;
7729 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
7731 if (
I.isEquality()) {
7733 bool IsExact =
false;
7734 APSInt RHSCvt(IntWidth, LHSUnsigned);
7743 if (*
RHS != RHSRoundInt) {
7763 if ((
int)IntWidth > MantissaWidth) {
7765 int Exp = ilogb(*
RHS);
7768 if (MaxExponent < (
int)IntWidth - !LHSUnsigned)
7774 if (MantissaWidth <= Exp && Exp <= (
int)IntWidth - !LHSUnsigned)
7783 assert(!
RHS->isNaN() &&
"NaN comparison not already folded!");
7786 switch (
I.getPredicate()) {
7876 APSInt RHSInt(IntWidth, LHSUnsigned);
7879 if (!
RHS->isZero()) {
7893 if (
RHS->isNegative())
7899 if (
RHS->isNegative())
7905 if (
RHS->isNegative())
7912 if (!
RHS->isNegative())
7918 if (
RHS->isNegative())
7924 if (
RHS->isNegative())
7930 if (
RHS->isNegative())
7937 if (!
RHS->isNegative())
7991 if (
C->isNegative())
7992 Pred =
I.getSwappedPredicate();
8007 if (!
C->isPosZero()) {
8008 if (!
C->isSmallestNormalized())
8021 switch (
I.getPredicate()) {
8047 switch (
I.getPredicate()) {
8072 assert(!
I.hasNoNaNs() &&
"fcmp should have simplified");
8077 assert(!
I.hasNoNaNs() &&
"fcmp should have simplified");
8091 return replacePredAndOp0(&
I,
I.getPredicate(),
X);
8113 if (!cast<Instruction>(
I.getOperand(0))->hasNoInfs())
8114 I.setHasNoInfs(
false);
8116 switch (
I.getPredicate()) {
8161 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8166 Pred =
I.getSwappedPredicate();
8175 return new FCmpInst(Pred, Op0, Zero,
"", &
I);
8212 I.getFunction()->getDenormalMode(
8234 if (!FloorX && !CeilX) {
8238 Pred =
I.getSwappedPredicate();
8295 bool Changed =
false;
8306 Value *Op0 =
I.getOperand(0), *Op1 =
I.getOperand(1);
8313 assert(OpType == Op1->getType() &&
"fcmp with different-typed operands?");
8337 if (
I.isCommutative()) {
8338 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
8360 return new FCmpInst(
I.getSwappedPredicate(),
X,
Y,
"", &
I);
8373 if (
SelectInst *SI = dyn_cast<SelectInst>(
I.user_back())) {
8442 Type *IntTy =
X->getType();
8454 case Instruction::Select:
8462 case Instruction::FSub:
8467 case Instruction::PHI:
8471 case Instruction::SIToFP:
8472 case Instruction::UIToFP:
8476 case Instruction::FDiv:
8480 case Instruction::Load:
8481 if (
auto *
GEP = dyn_cast<GetElementPtrInst>(LHSI->
getOperand(0)))
8482 if (
auto *GV = dyn_cast<GlobalVariable>(
GEP->getOperand(0)))
8484 cast<LoadInst>(LHSI),
GEP, GV,
I))
8504 return new FCmpInst(
I.getSwappedPredicate(),
X, NegC,
"", &
I);
8523 X->getType()->getScalarType()->getFltSemantics();
8559 Constant *NewC = ConstantFP::get(
X->getType(), TruncC);
8573 if (
auto *VecTy = dyn_cast<VectorType>(OpType))
8585 Value *CanonLHS =
nullptr, *CanonRHS =
nullptr;
8586 match(Op0, m_Intrinsic<Intrinsic::canonicalize>(
m_Value(CanonLHS)));
8587 match(Op1, m_Intrinsic<Intrinsic::canonicalize>(
m_Value(CanonRHS)));
8590 if (CanonLHS == Op1)
8591 return new FCmpInst(Pred, Op1, Op1,
"", &
I);
8594 if (CanonRHS == Op0)
8595 return new FCmpInst(Pred, Op0, Op0,
"", &
I);
8598 if (CanonLHS && CanonRHS)
8599 return new FCmpInst(Pred, CanonLHS, CanonRHS,
"", &
I);
8602 if (
I.getType()->isVectorTy())
8606 return Changed ? &
I :
nullptr;
AMDGPU Register Bank Select
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Instruction * foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
static Instruction * foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize fabs(X) compared with zero.
static Value * rewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags NW, const DataLayout &DL, SetVector< Value * > &Explored, InstCombiner &IC)
Returns a re-written value of Start as an indexed GEP using Base as a pointer.
static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1+In2, returning true if the result overflowed for this type.
static Instruction * foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldVectorCmp(CmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q, unsigned Depth=0)
static Value * createLogicFromTable(const std::bitset< 4 > &Table, Value *Op0, Value *Op1, IRBuilderBase &Builder, bool HasOneUse)
static Instruction * foldICmpOfUAddOv(ICmpInst &I)
static bool isChainSelectCmpBranch(const SelectInst *SI)
Return true when the instruction sequence within a block is select-cmp-br.
static Instruction * foldICmpInvariantGroup(ICmpInst &I)
static Instruction * foldReductionIdiom(ICmpInst &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL)
This function folds patterns produced by lowering of reduce idioms, such as llvm.vector....
static Instruction * canonicalizeICmpBool(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Integer compare with boolean values can always be turned into bitwise ops.
static Instruction * foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, Constant *RHSC, InstCombinerImpl &CI)
static Value * foldICmpOrXorSubChain(ICmpInst &Cmp, BinaryOperator *Or, InstCombiner::BuilderTy &Builder)
Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
static bool hasBranchUse(ICmpInst &I)
Given an icmp instruction, return true if any use of this comparison is a branch on sign bit comparis...
static Value * foldICmpWithLowBitMaskedVal(CmpPredicate Pred, Value *Op0, Value *Op1, const SimplifyQuery &Q, InstCombiner &IC)
Some comparisons can be simplified.
static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth)
When performing a comparison against a constant, it is possible that not all the bits in the LHS are ...
static Instruction * foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl, const APInt &C)
Fold icmp (shl nuw C2, Y), C.
static Instruction * foldFCmpWithFloorAndCeil(FCmpInst &I, InstCombinerImpl &IC)
static Instruction * foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
static Instruction * foldICmpOfCmpIntrinsicWithConstant(CmpPredicate Pred, IntrinsicInst *I, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * processUMulZExtIdiom(ICmpInst &I, Value *MulVal, const APInt *OtherVal, InstCombinerImpl &IC)
Recognize and process idiom involving test for multiplication overflow.
static Instruction * foldSqrtWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC)
Optimize sqrt(X) compared with zero.
static Instruction * foldFCmpFNegCommonOp(FCmpInst &I)
static Instruction * foldICmpWithHighBitMask(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
static ICmpInst * canonicalizeCmpWithConstant(ICmpInst &I)
If we have an icmp le or icmp ge instruction with a constant operand, turn it into the appropriate ic...
static Instruction * foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder)
Fold an icmp with LLVM intrinsics.
static Instruction * foldICmpUSubSatOrUAddSatWithConstant(CmpPredicate Pred, SaturatingInst *II, const APInt &C, InstCombiner::BuilderTy &Builder)
static Instruction * foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned=false)
Compute Result = In1-In2, returning true if the result overflowed for this type.
static bool canRewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags &NW, const DataLayout &DL, SetVector< Value * > &Explored)
Returns true if we can rewrite Start as a GEP with pointer Base and some integer offset.
static Instruction * foldICmpXNegX(ICmpInst &I, InstCombiner::BuilderTy &Builder)
static Instruction * processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, ConstantInt *CI2, ConstantInt *CI1, InstCombinerImpl &IC)
The caller has matched a pattern of the form: I = icmp ugt (add (add A, B), CI2), CI1 If this is of t...
static Value * foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ, InstCombiner::BuilderTy &Builder)
static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C)
Returns true if the exploded icmp can be expressed as a signed comparison to zero and updates the pre...
static Instruction * transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, const DataLayout &DL, InstCombiner &IC)
Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
static Instruction * foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs, const APInt &CRhs, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q)
static void setInsertionPoint(IRBuilder<> &Builder, Value *V, bool Before=true)
static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS, bool IsSigned)
static Value * foldICmpWithTruncSignExtendedVal(ICmpInst &I, InstCombiner::BuilderTy &Builder)
Some comparisons can be simplified.
static Instruction * foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC)
This file provides internal interfaces used to implement the InstCombine.
This file provides the interface for the instcombine pass implementation.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
mir Rename Register Operands
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
APInt bitcastToAPInt() const
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
FPClassTest classify() const
Return the FPClassTest which will return true for the value.
opStatus roundToIntegral(roundingMode RM)
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
static void udivrem(const APInt &LHS, const APInt &RHS, APInt &Quotient, APInt &Remainder)
Dual division/remainder interface.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
unsigned ceilLogBase2() const
bool sgt(const APInt &RHS) const
Signed greater than comparison.
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
APInt usub_ov(const APInt &RHS, bool &Overflow) const
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
bool eq(const APInt &RHS) const
Equality comparison.
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
APInt uadd_ov(const APInt &RHS, bool &Overflow) const
void negate()
Negate this APInt in place.
unsigned countr_zero() const
Count the number of trailing zero bits.
unsigned countl_zero() const
The APInt version of std::countl_zero.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
void flipAllBits()
Toggle every bit to its opposite value.
unsigned countl_one() const
Count the number of leading one bits.
unsigned logBase2() const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool isMask(unsigned numBits) const
bool isMaxSignedValue() const
Determine if this is the largest signed value.
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt shl(unsigned shiftAmt) const
Left-shift function.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
bool isOne() const
Determine if this is a value of 1.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
unsigned countr_one() const
Count the number of trailing one bits.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
An arbitrary precision integer that knows its signedness.
an instruction to allocate memory on the stack
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Class to represent array types.
LLVM Basic Block Representation.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
BinaryOps getOpcode() const
static BinaryOperator * CreateNot(Value *Op, const Twine &Name="", InsertPosition InsertBefore=nullptr)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Conditional or Unconditional Branch instruction.
Value * getArgOperand(unsigned i) const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
This class is the base class for the comparison instructions.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
static CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
bool isStrictPredicate() const
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
bool isIntPredicate() const
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getPointerBitCastOrAddrSpaceCast(Constant *C, Type *Ty)
Create a BitCast or AddrSpaceCast for a pointer type depending on the address space.
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getNot(Constant *C)
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getXor(Constant *C1, Constant *C2)
static Constant * getNeg(Constant *C, bool HasNSW=false)
static Constant * getZero(Type *Ty, bool Negative=false)
This is the shared class of boolean and integer constants.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
getLimitedValue - If the value is smaller than the specified limit, return it, otherwise return the l...
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
static ConstantInt * getFalse(LLVMContext &Context)
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
const APInt & getValue() const
Return the constant as an APInt value reference.
static ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
std::optional< ConstantRange > exactUnionWith(const ConstantRange &CR) const
Union the two ranges and return the result if it can be represented exactly, otherwise return std::nu...
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
ConstantRange truncate(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
bool isEmptySet() const
Return true if this set contains no members.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
std::optional< ConstantRange > exactIntersectWith(const ConstantRange &CR) const
Intersect the two ranges and return the result if it can be represented exactly, otherwise return std...
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
static Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
This is an important base class in LLVM.
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static Constant * getAllOnesValue(Type *Ty)
const APInt & getUniqueInteger() const
If C is a constant integer then return its value, otherwise C must be a vector of constant integers,...
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Type * getSmallestLegalIntType(LLVMContext &C, unsigned Width=0) const
Returns the smallest integer type with size at least as big as Width bits.
iterator find(const_arg_type_t< KeyT > Val)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
ArrayRef< BranchInst * > conditionsFor(const Value *V) const
Access the list of branches which affect this value.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction compares its operands according to the predicate given to the constructor.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
GEPNoWrapFlags intersectForOffsetAdd(GEPNoWrapFlags Other) const
Given (gep (gep p, x), y), determine the nowrap flags for (gep p, x+y).
static GEPNoWrapFlags none()
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
Type * getSourceElementType() const
Value * getPointerOperand()
GEPNoWrapFlags getNoWrapFlags() const
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getFlippedSignednessPredicate() const
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->EQ.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Common base class shared among various IRBuilders.
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Value * CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
ConstantInt * getTrue()
Get the constant value for i1 true.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, const Twine &Name="", MDNode *FPMathTag=nullptr, FMFSource FMFSource={})
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", GEPNoWrapFlags NW=GEPNoWrapFlags::none())
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Value * createIsFPClass(Value *FPNum, unsigned Test)
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ConstantInt * getFalse()
Get the constant value for i1 false.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Value * CreateIsNull(Value *Arg, const Twine &Name="")
Return a boolean value testing if Arg == 0.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Value * CreateURem(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Instruction * foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, const APInt &C)
Fold icmp ({al}shr X, Y), C.
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldICmpWithZextOrSext(ICmpInst &ICmp)
Instruction * foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, ConstantInt *C)
Instruction * foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Instruction * foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
Instruction * foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, const APInt &C)
Fold icmp (or X, Y), C.
Instruction * foldICmpTruncWithTruncOrExt(ICmpInst &Cmp, const SimplifyQuery &Q)
Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
Instruction * foldSignBitTest(ICmpInst &I)
Fold equality-comparison between zero and any (maybe truncated) right-shift by one-less-than-bitwidth...
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN, bool AllowMultipleUses=false)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
Value * insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, bool isSigned, bool Inside)
Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise (V < Lo || V >= Hi).
Instruction * foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ)
Try to fold icmp (binop), X or icmp X, (binop).
Instruction * foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, const APInt &C)
Fold icmp (sub X, Y), C.
Instruction * foldICmpInstWithConstantNotInt(ICmpInst &Cmp)
Handle icmp with constant (but not simple integer constant) RHS.
Instruction * foldICmpShlConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (shl AP2, A), AP1)" -> (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
Value * reassociateShiftAmtsOfTwoSameDirectionShifts(BinaryOperator *Sh0, const SimplifyQuery &SQ, bool AnalyzeForSignBitExtraction=false)
Instruction * foldICmpEqIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an equality icmp with LLVM intrinsic and constant operand.
Value * foldMultiplicationOverflowCheck(ICmpInst &Cmp)
Fold (-1 u/ x) u< y ((x * y) ?/ x) != y to @llvm.
Instruction * foldICmpWithConstant(ICmpInst &Cmp)
Fold icmp Pred X, C.
CmpInst * canonicalizeICmpPredicate(CmpInst &I)
If we have a comparison with a non-canonical predicate, if we can update all the users,...
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * foldICmpWithZero(ICmpInst &Cmp)
Instruction * foldICmpCommutative(CmpPredicate Pred, Value *Op0, Value *Op1, ICmpInst &CxtI)
Instruction * foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C)
Fold an icmp equality instruction with binary operator LHS and constant RHS: icmp eq/ne BO,...
Instruction * foldICmpUsingBoolRange(ICmpInst &I)
If one operand of an icmp is effectively a bool (value range of {0,1}), then try to reduce patterns b...
Instruction * foldICmpWithTrunc(ICmpInst &Cmp)
Instruction * foldICmpIntrinsicWithConstant(ICmpInst &ICI, IntrinsicInst *II, const APInt &C)
Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
bool SimplifyDemandedBits(Instruction *I, unsigned Op, const APInt &DemandedMask, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q) override
This form of SimplifyDemandedBits simplifies the specified instruction operand if possible,...
bool matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, ConstantInt *&Less, ConstantInt *&Equal, ConstantInt *&Greater)
Match a select chain which produces one of three values based on whether the LHS is less than,...
Instruction * foldCmpLoadFromIndexedGlobal(LoadInst *LI, GetElementPtrInst *GEP, GlobalVariable *GV, CmpInst &ICI, ConstantInt *AndCst=nullptr)
This is called when we see this pattern: cmp pred (load (gep GV, ...)), cmpcst where GV is a global v...
Instruction * visitFCmpInst(FCmpInst &I)
Instruction * foldICmpUsingKnownBits(ICmpInst &Cmp)
Try to fold the comparison based on range information we can get by checking whether bits are known t...
Instruction * foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, const APInt &C)
Fold icmp ({su}div X, Y), C.
Instruction * foldIRemByPowerOfTwoToBitTest(ICmpInst &I)
If we have: icmp eq/ne (urem/srem x, y), 0 iff y is a power-of-two, we can replace this with a bit te...
Instruction * foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, Constant *RHSC)
Fold fcmp ([us]itofp x, cst) if possible.
Instruction * foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C)
Fold icmp (udiv X, Y), C.
Instruction * foldICmpAddOpConst(Value *X, const APInt &C, CmpPredicate Pred)
Fold "icmp pred (X+C), X".
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Instruction * foldICmpWithCastOp(ICmpInst &ICmp)
Handle icmp (cast x), (cast or constant).
Instruction * foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, const APInt &C)
Fold icmp (trunc X), C.
Instruction * foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, const APInt &C)
Fold icmp (add X, Y), C.
Instruction * foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, const APInt &C)
Fold icmp (mul X, Y), C.
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
Instruction * foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
Fold icmp (xor X, Y), C.
Instruction * foldSelectICmp(CmpPredicate Pred, SelectInst *SI, Value *RHS, const ICmpInst &I)
Instruction * foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp, const APInt &C)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1, const APInt &C2)
Fold icmp (and (sh X, Y), C2), C1.
Instruction * foldICmpInstWithConstant(ICmpInst &Cmp)
Try to fold integer comparisons with a constant operand: icmp Pred X, C where X is some kind of instr...
Instruction * foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C)
For power-of-2 C: ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1) ((X s>> ShiftC) ^ X) u> (C - 1) -...
Instruction * foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, const APInt &C)
Fold icmp (shl X, Y), C.
Instruction * foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, const APInt &C)
Fold icmp (and X, Y), C.
Instruction * foldICmpEquality(ICmpInst &Cmp)
Instruction * foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, CmpPredicate Pred)
Fold icmp Pred min|max(X, Y), Z.
bool dominatesAllUses(const Instruction *DI, const Instruction *UI, const BasicBlock *DB) const
True when DB dominates all uses of DI except UI.
bool foldAllocaCmp(AllocaInst *Alloca)
Instruction * visitICmpInst(ICmpInst &I)
OverflowResult computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const
Instruction * foldICmpWithDominatingICmp(ICmpInst &Cmp)
Canonicalize icmp instructions based on dominating conditions.
bool replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, const unsigned SIOpd)
Try to replace select with select operand SIOpd in SI-ICmp sequence.
Instruction * foldICmpShrConstConst(ICmpInst &I, Value *ShAmt, const APInt &C1, const APInt &C2)
Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> (icmp eq/ne A, Log2(AP2/AP1)) -> (icmp eq/ne A,...
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
Instruction * foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1)
Fold icmp (and X, C2), C1.
Instruction * foldICmpBitCast(ICmpInst &Cmp)
Instruction * foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, CmpPredicate Cond, Instruction &I)
Fold comparisons between a GEP instruction and something else.
The core instruction combiner logic.
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI, bool IsNSW=false) const
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero=false, unsigned Depth=0, const Instruction *CxtI=nullptr)
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
static Constant * SubOne(Constant *C)
Subtract one from a Constant.
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
static bool isCanonicalPredicate(CmpPredicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
bool canFreelyInvertAllUsersOf(Instruction *V, Value *IgnoredUser)
Given i1 V, can every user of V be freely adapted if V is changed to !V ? InstCombine's freelyInvertA...
void addToWorklist(Instruction *I)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const Instruction *CxtI) const
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const Instruction *CxtI) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
const SimplifyQuery & getSimplifyQuery() const
unsigned ComputeMaxSignificantBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
bool hasNoNaNs() const LLVM_READONLY
Determine whether the no-NaNs flag is set.
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool hasNoInfs() const LLVM_READONLY
Determine whether the no-infs flag is set.
bool isArithmeticShift() const
Return true if this is an arithmetic shift right.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
This class represents min/max intrinsics.
A Module instance is used to store all the information related to an LLVM module.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
Represents a saturating add/sub intrinsic.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
bool contains(const key_type &key) const
Check if the SetVector contains the given key.
This instruction constructs a fixed permutation of two input vectors.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
reverse_iterator rbegin()
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Class to represent struct types.
This class represents a truncation of integer types.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getIntegerBitWidth() const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isPPC_FP128Ty() const
Return true if this is powerpc long double.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
int getFPMantissaWidth() const
Return the width of the mantissa of this type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr) const
Accumulate the constant offset this value has compared to a base pointer.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
bool hasNUsesOrMore(unsigned N) const
Return true if this value has N uses or more.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
APInt RoundingUDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A unsign-divided by B, rounded by the given rounding mode.
APInt RoundingSDiv(const APInt &A, const APInt &B, APInt::Rounding RM)
Return A sign-divided by B, rounded by the given rounding mode.
@ C
The default llvm calling convention, compatible with C.
Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_combine_or< CastInst_match< OpTy, ZExtInst >, OpTy > m_ZExtOrSelf(const OpTy &Op)
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
specific_intval< true > m_SpecificIntAllowPoison(const APInt &V)
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
OverflowingBinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWNeg(const ValTy &V)
Matches a 'Neg' as 'sub nsw 0, V'.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
BinOpPred_match< LHS, RHS, is_logical_shift_op > m_LogicalShift(const LHS &L, const RHS &R)
Matches logical shift operations.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
NoWrapTrunc_match< OpTy, TruncInst::NoSignedWrap > m_NSWTrunc(const OpTy &Op)
Matches trunc nsw.
OneUse_match< T > m_OneUse(const T &SubPattern)
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
ThreeOps_match< decltype(m_Value()), LHS, RHS, Instruction::Select, true > m_c_Select(const LHS &L, const RHS &R)
Match Select(C, LHS, RHS) or Select(C, RHS, LHS)
CastInst_match< OpTy, FPExtInst > m_FPExt(const OpTy &Op)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
cst_pred_ty< is_negated_power2_or_zero > m_NegatedPower2OrZero()
Match a integer or vector negated power-of-2.
NoWrapTrunc_match< OpTy, TruncInst::NoUnsignedWrap > m_NUWTrunc(const OpTy &Op)
Matches trunc nuw.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
cst_pred_ty< is_lowbit_mask_or_zero > m_LowBitMaskOrZero()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Signum_match< Val_t > m_Signum(const Val_t &V)
Matches a signum pattern.
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
UAddWithOverflow_match< LHS_t, RHS_t, Sum_t > m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S)
Match an icmp instruction checking for unsigned overflow on addition.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
CastInst_match< OpTy, FPTruncInst > m_FPTrunc(const OpTy &Op)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_unless< Ty > m_Unless(const Ty &M)
Match if the inner matcher does NOT match.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
cst_pred_ty< icmp_pred_with_threshold > m_SpecificInt_ICMP(ICmpInst::Predicate Predicate, const APInt &Threshold)
Match an integer or vector with every element comparing 'pred' (eg/ne/...) to Threshold.
This is an optimization pass for GlobalISel generic memory operations.
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
bool isKnownNeverInfinity(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not an infinity or if the floating-point vector val...
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
bool isKnownNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
SelectPatternFlavor
Specific patterns of select instructions we can match.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, bool StoreCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ And
Bitwise or logical AND of integers.
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
std::optional< DecomposedBitTest > decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate Pred, bool LookThroughTrunc=true, bool AllowNonZeroC=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
constexpr unsigned BitWidth
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
std::optional< std::pair< CmpPredicate, Constant * > > getFlippedStrictnessPredicateAndConstant(CmpPredicate Pred, Constant *C)
Convert an integer comparison with a constant RHS into an equivalent form with the strictness flipped...
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static constexpr roundingMode rmNearestTiesToEven
static constexpr roundingMode rmTowardZero
This callback is used in conjunction with PointerMayBeCaptured.
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
static constexpr DenormalMode getIEEE()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
bool isZero() const
Returns true if value is all zero.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
APInt getSignedMaxValue() const
Return the maximal signed value possible given these KnownBits.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned getBitWidth() const
Get the bit width of this value.
bool isConstant() const
Returns true if we know the value of all bits.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isStrictlyPositive() const
Returns true if this value is known to be positive.
bool isNegative() const
Returns true if this value is known to be negative.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
APInt getSignedMinValue() const
Return the minimal signed value possible given these KnownBits.
const APInt & getConstant() const
Returns the value when all bits have a known value.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutDomCondCache() const
A MapVector that performs no allocations if smaller than a certain size.