78#include "llvm/IR/IntrinsicsAArch64.h"
79#include "llvm/IR/IntrinsicsAMDGPU.h"
80#include "llvm/IR/IntrinsicsWebAssembly.h"
111using namespace PatternMatch;
112using namespace SwitchCG;
114#define DEBUG_TYPE "isel"
122 cl::desc(
"Insert the experimental `assertalign` node."),
127 cl::desc(
"Generate low-precision inline sequences "
128 "for some float libcalls"),
134 cl::desc(
"Set the case probability threshold for peeling the case from a "
135 "switch statement. A value greater than 100 will void this "
155 const SDValue *Parts,
unsigned NumParts,
158 std::optional<CallingConv::ID>
CC);
167 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
169 std::optional<CallingConv::ID>
CC = std::nullopt,
170 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
174 PartVT, ValueVT,
CC))
181 assert(NumParts > 0 &&
"No parts to assemble!");
192 unsigned RoundBits = PartBits * RoundParts;
193 EVT RoundVT = RoundBits == ValueBits ?
199 if (RoundParts > 2) {
203 PartVT, HalfVT, V, InChain);
214 if (RoundParts < NumParts) {
216 unsigned OddParts = NumParts - RoundParts;
219 OddVT, V, InChain,
CC);
236 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
247 !PartVT.
isVector() &&
"Unexpected split");
259 if (PartEVT == ValueVT)
263 ValueVT.
bitsLT(PartEVT)) {
276 if (ValueVT.
bitsLT(PartEVT)) {
281 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
296 llvm::Attribute::StrictFP)) {
298 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
310 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
311 ValueVT.
bitsLT(PartEVT)) {
320 const Twine &ErrMsg) {
321 const Instruction *
I = dyn_cast_or_null<Instruction>(V);
325 if (
const CallInst *CI = dyn_cast<CallInst>(
I))
326 if (CI->isInlineAsm()) {
328 *CI, ErrMsg +
", possible invalid constraint for vector type"));
340 const SDValue *Parts,
unsigned NumParts,
343 std::optional<CallingConv::ID> CallConv) {
345 assert(NumParts > 0 &&
"No parts to assemble!");
346 const bool IsABIRegCopy = CallConv.has_value();
355 unsigned NumIntermediates;
360 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
361 NumIntermediates, RegisterVT);
365 NumIntermediates, RegisterVT);
368 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
370 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
373 "Part type sizes don't match!");
377 if (NumIntermediates == NumParts) {
380 for (
unsigned i = 0; i != NumParts; ++i)
382 V, InChain, CallConv);
383 }
else if (NumParts > 0) {
386 assert(NumParts % NumIntermediates == 0 &&
387 "Must expand into a divisible number of parts!");
388 unsigned Factor = NumParts / NumIntermediates;
389 for (
unsigned i = 0; i != NumIntermediates; ++i)
391 IntermediateVT, V, InChain, CallConv);
406 DL, BuiltVectorTy, Ops);
412 if (PartEVT == ValueVT)
428 "Cannot narrow, it would be a lossy transformation");
434 if (PartEVT == ValueVT)
459 }
else if (ValueVT.
bitsLT(PartEVT)) {
468 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
499 std::optional<CallingConv::ID> CallConv);
506 unsigned NumParts,
MVT PartVT,
const Value *V,
507 std::optional<CallingConv::ID> CallConv = std::nullopt,
521 unsigned OrigNumParts = NumParts;
523 "Copying to an illegal type!");
529 EVT PartEVT = PartVT;
530 if (PartEVT == ValueVT) {
531 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
540 assert(NumParts == 1 &&
"Do not know what to promote to!");
551 "Unknown mismatch!");
553 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
554 if (PartVT == MVT::x86mmx)
559 assert(NumParts == 1 && PartEVT != ValueVT);
565 "Unknown mismatch!");
568 if (PartVT == MVT::x86mmx)
575 "Failed to tile the value with PartVT!");
578 if (PartEVT != ValueVT) {
580 "scalar-to-vector conversion failed");
589 if (NumParts & (NumParts - 1)) {
592 "Do not know what to expand to!");
594 unsigned RoundBits = RoundParts * PartBits;
595 unsigned OddParts = NumParts - RoundParts;
604 std::reverse(Parts + RoundParts, Parts + NumParts);
606 NumParts = RoundParts;
618 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
619 for (
unsigned i = 0; i < NumParts; i += StepSize) {
620 unsigned ThisBits = StepSize * PartBits / 2;
623 SDValue &Part1 = Parts[i+StepSize/2];
630 if (ThisBits == PartBits && ThisVT != PartVT) {
638 std::reverse(Parts, Parts + OrigNumParts);
655 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
660 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
662 "Cannot widen to illegal type");
665 }
else if (PartEVT != ValueEVT) {
680 Ops.
append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
691 std::optional<CallingConv::ID> CallConv) {
695 const bool IsABIRegCopy = CallConv.has_value();
698 EVT PartEVT = PartVT;
699 if (PartEVT == ValueVT) {
718 TargetLowering::TypeWidenVector) {
745 "lossy conversion of vector to scalar type");
760 unsigned NumIntermediates;
764 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
769 NumIntermediates, RegisterVT);
772 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
774 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
777 "Mixing scalable and fixed vectors when copying in parts");
779 std::optional<ElementCount> DestEltCnt;
789 if (ValueVT == BuiltVectorTy) {
813 for (
unsigned i = 0; i != NumIntermediates; ++i) {
828 if (NumParts == NumIntermediates) {
831 for (
unsigned i = 0; i != NumParts; ++i)
833 }
else if (NumParts > 0) {
836 assert(NumIntermediates != 0 &&
"division by zero");
837 assert(NumParts % NumIntermediates == 0 &&
838 "Must expand into a divisible number of parts!");
839 unsigned Factor = NumParts / NumIntermediates;
840 for (
unsigned i = 0; i != NumIntermediates; ++i)
847 EVT valuevt, std::optional<CallingConv::ID>
CC)
848 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
849 RegCount(1, regs.
size()), CallConv(
CC) {}
853 std::optional<CallingConv::ID>
CC) {
867 for (
unsigned i = 0; i != NumRegs; ++i)
868 Regs.push_back(Reg + i);
869 RegVTs.push_back(RegisterVT);
871 Reg = Reg.id() + NumRegs;
898 for (
unsigned i = 0; i != NumRegs; ++i) {
904 *Glue =
P.getValue(2);
907 Chain =
P.getValue(1);
936 EVT FromVT(MVT::Other);
940 }
else if (NumSignBits > 1) {
948 assert(FromVT != MVT::Other);
954 RegisterVT, ValueVT, V, Chain,
CallConv);
970 unsigned NumRegs =
Regs.size();
984 NumParts, RegisterVT, V,
CallConv, ExtendKind);
990 for (
unsigned i = 0; i != NumRegs; ++i) {
1002 if (NumRegs == 1 || Glue)
1013 Chain = Chains[NumRegs-1];
1019 unsigned MatchingIdx,
const SDLoc &dl,
1021 std::vector<SDValue> &Ops)
const {
1026 Flag.setMatchingOp(MatchingIdx);
1035 Flag.setRegClass(RC->
getID());
1046 "No 1:1 mapping from clobbers to regs?");
1049 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1054 "If we clobbered the stack pointer, MFI should know about it.");
1063 for (
unsigned i = 0; i != NumRegs; ++i) {
1064 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1065 unsigned TheReg =
Regs[Reg++];
1066 Ops.push_back(DAG.
getRegister(TheReg, RegisterVT));
1076 unsigned RegCount = std::get<0>(CountAndVT);
1077 MVT RegisterVT = std::get<1>(CountAndVT);
1101 UnusedArgNodeMap.clear();
1103 PendingExports.clear();
1104 PendingConstrainedFP.clear();
1105 PendingConstrainedFPStrict.clear();
1113 DanglingDebugInfoMap.clear();
1120 if (Pending.
empty())
1126 unsigned i = 0, e = Pending.
size();
1127 for (; i != e; ++i) {
1129 if (Pending[i].
getNode()->getOperand(0) == Root)
1137 if (Pending.
size() == 1)
1156 PendingConstrainedFP.size() +
1157 PendingConstrainedFPStrict.size());
1159 PendingConstrainedFP.end());
1160 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1161 PendingConstrainedFPStrict.end());
1162 PendingConstrainedFP.clear();
1163 PendingConstrainedFPStrict.clear();
1170 PendingExports.append(PendingConstrainedFPStrict.begin(),
1171 PendingConstrainedFPStrict.end());
1172 PendingConstrainedFPStrict.clear();
1173 return updateRoot(PendingExports);
1180 assert(Variable &&
"Missing variable");
1187 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1194 if (!
N.getNode() && isa<Argument>(
Address))
1202 auto *FINode = dyn_cast<FrameIndexSDNode>(
N.getNode());
1203 if (IsParameter && FINode) {
1206 true,
DL, SDNodeOrder);
1207 }
else if (isa<Argument>(
Address)) {
1211 FuncArgumentDbgValueKind::Declare,
N);
1215 true,
DL, SDNodeOrder);
1222 FuncArgumentDbgValueKind::Declare,
N)) {
1224 <<
" (could not emit func-arg dbg_value)\n");
1235 for (
auto It = FnVarLocs->locs_begin(&
I),
End = FnVarLocs->locs_end(&
I);
1237 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1239 if (It->Values.isKillLocation(It->Expr)) {
1245 It->Values.hasArgList())) {
1248 FnVarLocs->getDILocalVariable(It->VariableID),
1249 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1265 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1267 assert(DLR->getLabel() &&
"Missing label");
1269 DAG.
getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1274 if (SkipDbgVariableRecords)
1284 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1293 if (Values.
empty()) {
1302 [](
Value *V) {
return !V || isa<UndefValue>(V); })) {
1310 SDNodeOrder, IsVariadic)) {
1321 if (
I.isTerminator()) {
1322 HandlePHINodesInSuccessorBlocks(
I.getParent());
1326 if (!isa<DbgInfoIntrinsic>(
I))
1332 bool NodeInserted =
false;
1333 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1334 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1335 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1336 if (PCSectionsMD || MMRA) {
1337 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1338 DAG, [&](
SDNode *) { NodeInserted =
true; });
1344 !isa<GCStatepointInst>(
I))
1348 if (PCSectionsMD || MMRA) {
1349 auto It = NodeMap.find(&
I);
1350 if (It != NodeMap.end()) {
1355 }
else if (NodeInserted) {
1358 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1359 <<
I.getModule()->getName() <<
"]\n";
1368void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1378#define HANDLE_INST(NUM, OPCODE, CLASS) \
1379 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1380#include "llvm/IR/Instruction.def"
1392 for (
const Value *V : Values) {
1417 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1422 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1423 DIVariable *DanglingVariable = DDI.getVariable();
1425 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1427 << printDDI(
nullptr, DDI) <<
"\n");
1433 for (
auto &DDIMI : DanglingDebugInfoMap) {
1434 DanglingDebugInfoVector &DDIV = DDIMI.second;
1438 for (
auto &DDI : DDIV)
1439 if (isMatchingDbgValue(DDI))
1442 erase_if(DDIV, isMatchingDbgValue);
1450 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1451 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1454 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1455 for (
auto &DDI : DDIV) {
1458 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1462 "Expected inlined-at fields to agree");
1471 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1472 FuncArgumentDbgValueKind::Value, Val)) {
1474 << printDDI(V, DDI) <<
"\n");
1481 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1482 << ValSDNodeOrder <<
"\n");
1483 SDV = getDbgValue(Val, Variable, Expr,
DL,
1484 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1489 <<
" in EmitFuncArgumentDbgValue\n");
1491 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1503 DanglingDebugInfo &DDI) {
1508 const Value *OrigV = V;
1512 unsigned SDOrder = DDI.getSDNodeOrder();
1516 bool StackValue =
true;
1525 while (isa<Instruction>(V)) {
1526 const Instruction &VAsInst = *cast<const Instruction>(V);
1541 if (!AdditionalValues.
empty())
1551 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1552 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1560 assert(OrigV &&
"V shouldn't be null");
1565 << printDDI(OrigV, DDI) <<
"\n");
1582 unsigned Order,
bool IsVariadic) {
1587 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1592 for (
const Value *V : Values) {
1594 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1595 isa<ConstantPointerNull>(V)) {
1601 if (
auto *CE = dyn_cast<ConstantExpr>(V))
1602 if (CE->getOpcode() == Instruction::IntToPtr) {
1609 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1620 if (!
N.getNode() && isa<Argument>(V))
1621 N = UnusedArgNodeMap[V];
1626 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1627 FuncArgumentDbgValueKind::Value,
N))
1629 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
1654 bool IsParamOfFunc =
1664 unsigned Reg = VMI->second;
1668 V->getType(), std::nullopt);
1674 unsigned BitsToDescribe = 0;
1676 BitsToDescribe = *VarSize;
1678 BitsToDescribe = Fragment->SizeInBits;
1681 if (
Offset >= BitsToDescribe)
1684 unsigned RegisterSize = RegAndSize.second;
1685 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1686 ? BitsToDescribe -
Offset
1689 Expr,
Offset, FragmentSize);
1693 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1711 false, DbgLoc, Order, IsVariadic);
1718 for (
auto &Pair : DanglingDebugInfoMap)
1719 for (
auto &DDI : Pair.second)
1751 if (
N.getNode())
return N;
1793 if (
const Constant *
C = dyn_cast<Constant>(V)) {
1805 getValue(CPA->getAddrDiscriminator()),
1806 getValue(CPA->getDiscriminator()));
1809 if (isa<ConstantPointerNull>(
C)) {
1810 unsigned AS = V->getType()->getPointerAddressSpace();
1818 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
1821 if (isa<UndefValue>(
C) && !V->getType()->isAggregateType())
1825 visit(CE->getOpcode(), *CE);
1827 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1831 if (isa<ConstantStruct>(
C) || isa<ConstantArray>(
C)) {
1833 for (
const Use &U :
C->operands()) {
1839 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1840 Constants.push_back(
SDValue(Val, i));
1847 dyn_cast<ConstantDataSequential>(
C)) {
1849 for (
unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1853 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1857 if (isa<ArrayType>(CDS->getType()))
1862 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1863 assert((isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C)) &&
1864 "Unknown struct or array constant!");
1868 unsigned NumElts = ValueVTs.
size();
1872 for (
unsigned i = 0; i != NumElts; ++i) {
1873 EVT EltVT = ValueVTs[i];
1874 if (isa<UndefValue>(
C))
1888 if (
const auto *Equiv = dyn_cast<DSOLocalEquivalent>(
C))
1889 return getValue(Equiv->getGlobalValue());
1891 if (
const auto *
NC = dyn_cast<NoCFIValue>(
C))
1894 if (VT == MVT::aarch64svcount) {
1895 assert(
C->isNullValue() &&
"Can only zero this target type!");
1901 assert(
C->isNullValue() &&
"Can only zero this target type!");
1912 VectorType *VecTy = cast<VectorType>(V->getType());
1918 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1919 for (
unsigned i = 0; i != NumElements; ++i)
1925 if (isa<ConstantAggregateZero>(
C)) {
1943 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1952 if (
const Instruction *Inst = dyn_cast<Instruction>(V)) {
1956 Inst->getType(), std::nullopt);
1964 if (
const auto *BB = dyn_cast<BasicBlock>(V))
1970void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
1979 if (IsMSVCCXX || IsCoreCLR)
2006 Value *ParentPad =
I.getCatchSwitchParentPad();
2008 if (isa<ConstantTokenNone>(ParentPad))
2011 SuccessorColor = cast<Instruction>(ParentPad)->
getParent();
2012 assert(SuccessorColor &&
"No parent funclet for catchret!");
2014 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2023void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2067 if (isa<CleanupPadInst>(Pad)) {
2069 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2070 UnwindDests.back().first->setIsEHScopeEntry();
2072 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2075 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2076 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2077 UnwindDests.back().first->setIsEHScopeEntry();
2108 assert(UnwindDests.size() <= 1 &&
2109 "There should be at most one unwind destination for wasm");
2116 if (isa<LandingPadInst>(Pad)) {
2118 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2120 }
else if (isa<CleanupPadInst>(Pad)) {
2123 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2124 UnwindDests.
back().first->setIsEHScopeEntry();
2125 UnwindDests.back().first->setIsEHFuncletEntry();
2127 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2129 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2130 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2132 if (IsMSVCCXX || IsCoreCLR)
2133 UnwindDests.back().first->setIsEHFuncletEntry();
2135 UnwindDests.back().first->setIsEHScopeEntry();
2137 NewEHPadBB = CatchSwitch->getUnwindDest();
2143 if (BPI && NewEHPadBB)
2145 EHPadBB = NewEHPadBB;
2152 auto UnwindDest =
I.getUnwindDest();
2159 for (
auto &UnwindDest : UnwindDests) {
2160 UnwindDest.first->setIsEHPad();
2161 addSuccessorWithProb(
FuncInfo.
MBB, UnwindDest.first, UnwindDest.second);
2173void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2177void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2191 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2211 unsigned NumValues = ValueVTs.
size();
2214 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2215 for (
unsigned i = 0; i != NumValues; ++i) {
2222 if (MemVTs[i] != ValueVTs[i])
2232 MVT::Other, Chains);
2233 }
else if (
I.getNumOperands() != 0) {
2236 unsigned NumValues = ValueVTs.
size();
2240 const Function *
F =
I.getParent()->getParent();
2243 I.getOperand(0)->getType(),
F->getCallingConv(),
2247 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2249 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2253 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2255 for (
unsigned j = 0;
j != NumValues; ++
j) {
2256 EVT VT = ValueVTs[
j];
2268 &Parts[0], NumParts, PartVT, &
I,
CC, ExtendKind);
2275 if (
I.getOperand(0)->getType()->isPointerTy()) {
2277 Flags.setPointerAddrSpace(
2278 cast<PointerType>(
I.getOperand(0)->getType())->getAddressSpace());
2281 if (NeedsRegBlock) {
2282 Flags.setInConsecutiveRegs();
2283 if (j == NumValues - 1)
2284 Flags.setInConsecutiveRegsLast();
2292 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2295 for (
unsigned i = 0; i < NumParts; ++i) {
2308 const Function *
F =
I.getParent()->getParent();
2310 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2313 Flags.setSwiftError();
2332 "LowerReturn didn't return a valid chain!");
2343 if (V->getType()->isEmptyTy())
2348 assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2349 "Unused value assigned virtual registers!");
2359 if (!isa<Instruction>(V) && !isa<Argument>(V))
return;
2372 if (
const Instruction *VI = dyn_cast<Instruction>(V)) {
2374 if (VI->getParent() == FromBB)
2383 if (isa<Argument>(V)) {
2400 const BasicBlock *SrcBB = Src->getBasicBlock();
2401 const BasicBlock *DstBB = Dst->getBasicBlock();
2405 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2415 Src->addSuccessorWithoutProb(Dst);
2418 Prob = getEdgeProbability(Src, Dst);
2419 Src->addSuccessor(Dst, Prob);
2425 return I->getParent() == BB;
2445 if (
const CmpInst *BOp = dyn_cast<CmpInst>(
Cond)) {
2449 if (CurBB == SwitchBB ||
2455 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2460 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2466 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2468 SL->SwitchCases.push_back(CB);
2477 SL->SwitchCases.push_back(CB);
2485 unsigned Depth = 0) {
2490 auto *
I = dyn_cast<Instruction>(V);
2494 if (Necessary !=
nullptr) {
2497 if (Necessary->contains(
I))
2505 for (
unsigned OpIdx = 0, E =
I->getNumOperands(); OpIdx < E; ++OpIdx)
2516 if (
I.getNumSuccessors() != 2)
2519 if (!
I.isConditional())
2531 if (BPI !=
nullptr) {
2537 std::optional<bool> Likely;
2540 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2544 if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2556 if (CostThresh <= 0)
2570 if (
const auto *RhsI = dyn_cast<Instruction>(Rhs))
2581 Value *BrCond =
I.getCondition();
2582 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2583 for (
const auto *U : Ins->users()) {
2585 if (
auto *UIns = dyn_cast<Instruction>(U))
2586 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2599 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2601 for (
const auto &InsPair : RhsDeps) {
2602 if (!ShouldCountInsn(InsPair.first)) {
2603 ToDrop = InsPair.first;
2607 if (ToDrop ==
nullptr)
2609 RhsDeps.erase(ToDrop);
2612 for (
const auto &InsPair : RhsDeps) {
2620 if (CostOfIncluding > CostThresh)
2646 const Value *BOpOp0, *BOpOp1;
2660 if (BOpc == Instruction::And)
2661 BOpc = Instruction::Or;
2662 else if (BOpc == Instruction::Or)
2663 BOpc = Instruction::And;
2669 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->
hasOneUse();
2674 TProb, FProb, InvertCond);
2684 if (Opc == Instruction::Or) {
2705 auto NewTrueProb = TProb / 2;
2706 auto NewFalseProb = TProb / 2 + FProb;
2709 NewFalseProb, InvertCond);
2716 Probs[1], InvertCond);
2718 assert(Opc == Instruction::And &&
"Unknown merge op!");
2738 auto NewTrueProb = TProb + FProb / 2;
2739 auto NewFalseProb = FProb / 2;
2742 NewFalseProb, InvertCond);
2749 Probs[1], InvertCond);
2758 if (Cases.size() != 2)
return true;
2762 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2763 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2764 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2765 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2771 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2772 Cases[0].
CC == Cases[1].
CC &&
2773 isa<Constant>(Cases[0].CmpRHS) &&
2774 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2775 if (Cases[0].
CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2777 if (Cases[0].
CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2784void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2790 if (
I.isUnconditional()) {
2796 if (Succ0MBB != NextBlock(BrMBB) ||
2809 const Value *CondVal =
I.getCondition();
2829 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2830 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2834 const Value *BOp0, *BOp1;
2837 Opcode = Instruction::And;
2839 Opcode = Instruction::Or;
2847 Opcode, BOp0, BOp1))) {
2849 getEdgeProbability(BrMBB, Succ0MBB),
2850 getEdgeProbability(BrMBB, Succ1MBB),
2855 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2859 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2866 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2872 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2875 SL->SwitchCases.clear();
2881 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2902 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2944 if (cast<ConstantInt>(CB.
CmpLHS)->isMinValue(
true)) {
2965 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2990 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2991 assert(JT.Reg &&
"Should lower JT Header first!");
2996 Index.getValue(1), Table, Index);
3005 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3006 const SDLoc &dl = *JT.SL;
3027 JT.Reg = JumpTableReg;
3039 MVT::Other, CopyTo, CMP,
3043 if (JT.MBB != NextBlock(SwitchBB))
3050 if (JT.MBB != NextBlock(SwitchBB))
3078 if (PtrTy != PtrMemTy)
3126 Entry.Node = GuardVal;
3128 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3129 Entry.IsInReg =
true;
3130 Args.push_back(Entry);
3136 getValue(GuardCheckFn), std::move(Args));
3138 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3152 Guard =
DAG.
getLoad(PtrMemTy, dl, Chain, GuardPtr,
3214 bool UsePtrType =
false;
3218 for (
unsigned i = 0, e =
B.Cases.size(); i != e; ++i)
3238 if (!
B.FallthroughUnreachable)
3239 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3240 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3244 if (!
B.FallthroughUnreachable) {
3257 if (
MBB != NextBlock(SwitchBB))
3275 if (PopCount == 1) {
3282 }
else if (PopCount == BB.
Range) {
3301 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3303 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3314 if (NextMBB != NextBlock(SwitchBB))
3321void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3332 assert(!
I.hasOperandBundlesOtherThan(
3333 {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3334 LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3335 LLVMContext::OB_cfguardtarget, LLVMContext::OB_ptrauth,
3336 LLVMContext::OB_clang_arc_attachedcall}) &&
3337 "Cannot lower invokes with arbitrary operand bundles yet!");
3339 const Value *Callee(
I.getCalledOperand());
3340 const Function *Fn = dyn_cast<Function>(Callee);
3341 if (isa<InlineAsm>(Callee))
3342 visitInlineAsm(
I, EHPadBB);
3347 case Intrinsic::donothing:
3349 case Intrinsic::seh_try_begin:
3350 case Intrinsic::seh_scope_begin:
3351 case Intrinsic::seh_try_end:
3352 case Intrinsic::seh_scope_end:
3358 case Intrinsic::experimental_patchpoint_void:
3359 case Intrinsic::experimental_patchpoint:
3360 visitPatchpoint(
I, EHPadBB);
3362 case Intrinsic::experimental_gc_statepoint:
3365 case Intrinsic::wasm_rethrow: {
3380 }
else if (
I.hasDeoptState()) {
3396 if (!isa<GCStatepointInst>(
I)) {
3408 addSuccessorWithProb(InvokeMBB, Return);
3409 for (
auto &UnwindDest : UnwindDests) {
3410 UnwindDest.first->setIsEHPad();
3411 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3420void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3425 assert(!
I.hasOperandBundlesOtherThan(
3426 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3427 "Cannot lower callbrs with arbitrary operand bundles yet!");
3429 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3435 Dests.
insert(
I.getDefaultDest());
3440 for (
unsigned i = 0, e =
I.getNumIndirectDests(); i < e; ++i) {
3443 Target->setIsInlineAsmBrIndirectTarget();
3444 Target->setMachineBlockAddressTaken();
3445 Target->setLabelMustBeEmitted();
3447 if (Dests.
insert(Dest).second)
3458void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3459 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3462void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3464 "Call to landingpad not in landing pad!");
3484 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3514 if (JTB.first.HeaderBB ==
First)
3515 JTB.first.HeaderBB =
Last;
3528 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3530 bool Inserted =
Done.insert(BB).second;
3535 addSuccessorWithProb(IndirectBrMBB, Succ);
3549 if (
const CallInst *Call = dyn_cast_or_null<CallInst>(
I.getPrevNode());
3550 Call &&
Call->doesNotReturn()) {
3554 if (
Call->isNonContinuableTrap())
3561void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3563 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3564 Flags.copyFMF(*FPOp);
3572void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3574 if (
auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&
I)) {
3575 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3576 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3578 if (
auto *ExactOp = dyn_cast<PossiblyExactOperator>(&
I))
3579 Flags.setExact(ExactOp->isExact());
3580 if (
auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&
I))
3581 Flags.setDisjoint(DisjointOp->isDisjoint());
3582 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3583 Flags.copyFMF(*FPOp);
3592void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3601 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3603 "Unexpected shift type");
3614 dyn_cast<const OverflowingBinaryOperator>(&
I)) {
3615 nuw = OFBinOp->hasNoUnsignedWrap();
3616 nsw = OFBinOp->hasNoSignedWrap();
3619 dyn_cast<const PossiblyExactOperator>(&
I))
3620 exact = ExactOp->isExact();
3623 Flags.setExact(exact);
3624 Flags.setNoSignedWrap(nsw);
3625 Flags.setNoUnsignedWrap(nuw);
3631void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3636 Flags.setExact(isa<PossiblyExactOperator>(&
I) &&
3637 cast<PossiblyExactOperator>(&
I)->isExact());
3642void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3661 Flags.setSameSign(
I.hasSameSign());
3669void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3675 auto *FPMO = cast<FPMathOperator>(&
I);
3680 Flags.copyFMF(*FPMO);
3692 return isa<SelectInst>(V);
3696void SelectionDAGBuilder::visitSelect(
const User &
I) {
3700 unsigned NumValues = ValueVTs.
size();
3701 if (NumValues == 0)
return;
3711 bool IsUnaryAbs =
false;
3712 bool Negate =
false;
3715 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3716 Flags.copyFMF(*FPOp);
3718 Flags.setUnpredictable(
3719 cast<SelectInst>(
I).getMetadata(LLVMContext::MD_unpredictable));
3723 EVT VT = ValueVTs[0];
3735 bool UseScalarMinMax = VT.
isVector() &&
3744 switch (SPR.Flavor) {
3750 switch (SPR.NaNBehavior) {
3763 switch (SPR.NaNBehavior) {
3807 for (
unsigned i = 0; i != NumValues; ++i) {
3816 for (
unsigned i = 0; i != NumValues; ++i) {
3830void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3836 if (
auto *Trunc = dyn_cast<TruncInst>(&
I)) {
3837 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3838 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3844void SelectionDAGBuilder::visitZExt(
const User &
I) {
3852 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3853 Flags.setNonNeg(PNI->hasNonNeg());
3858 if (
Flags.hasNonNeg() &&
3867void SelectionDAGBuilder::visitSExt(
const User &
I) {
3876void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3887void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3895void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3903void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3911void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
3917 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3918 Flags.setNonNeg(PNI->hasNonNeg());
3923void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
3931void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
3945void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
3957void SelectionDAGBuilder::visitBitCast(
const User &
I) {
3965 if (DestVT !=
N.getValueType())
3972 else if(
ConstantInt *
C = dyn_cast<ConstantInt>(
I.getOperand(0)))
3979void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
3981 const Value *SV =
I.getOperand(0);
3986 unsigned DestAS =
I.getType()->getPointerAddressSpace();
3994void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4002 InVec, InVal, InIdx));
4005void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4015void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4019 if (
auto *SVI = dyn_cast<ShuffleVectorInst>(&
I))
4020 Mask = SVI->getShuffleMask();
4022 Mask = cast<ConstantExpr>(
I).getShuffleMask();
4028 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4044 unsigned MaskNumElts =
Mask.size();
4046 if (SrcNumElts == MaskNumElts) {
4052 if (SrcNumElts < MaskNumElts) {
4056 if (MaskNumElts % SrcNumElts == 0) {
4060 unsigned NumConcat = MaskNumElts / SrcNumElts;
4061 bool IsConcat =
true;
4063 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4069 if ((
Idx % SrcNumElts != (i % SrcNumElts)) ||
4070 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4071 ConcatSrcs[i / SrcNumElts] != (
int)(
Idx / SrcNumElts))) {
4076 ConcatSrcs[i / SrcNumElts] =
Idx / SrcNumElts;
4083 for (
auto Src : ConcatSrcs) {
4096 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4097 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4114 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4116 if (
Idx >= (
int)SrcNumElts)
4117 Idx -= SrcNumElts - PaddedMaskNumElts;
4125 if (MaskNumElts != PaddedMaskNumElts)
4133 assert(SrcNumElts > MaskNumElts);
4137 int StartIdx[2] = {-1, -1};
4138 bool CanExtract =
true;
4139 for (
int Idx : Mask) {
4144 if (
Idx >= (
int)SrcNumElts) {
4153 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4154 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4158 StartIdx[Input] = NewStartIdx;
4161 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4167 for (
unsigned Input = 0; Input < 2; ++Input) {
4168 SDValue &Src = Input == 0 ? Src1 : Src2;
4169 if (StartIdx[Input] < 0)
4179 for (
int &
Idx : MappedOps) {
4180 if (
Idx >= (
int)SrcNumElts)
4181 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4195 for (
int Idx : Mask) {
4201 SDValue &Src =
Idx < (int)SrcNumElts ? Src1 : Src2;
4202 if (
Idx >= (
int)SrcNumElts)
Idx -= SrcNumElts;
4216 const Value *Op0 =
I.getOperand(0);
4217 const Value *Op1 =
I.getOperand(1);
4218 Type *AggTy =
I.getType();
4220 bool IntoUndef = isa<UndefValue>(Op0);
4221 bool FromUndef = isa<UndefValue>(Op1);
4231 unsigned NumAggValues = AggValueVTs.
size();
4232 unsigned NumValValues = ValValueVTs.
size();
4236 if (!NumAggValues) {
4244 for (; i != LinearIndex; ++i)
4245 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4250 for (; i != LinearIndex + NumValValues; ++i)
4251 Values[i] = FromUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4255 for (; i != NumAggValues; ++i)
4256 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4265 const Value *Op0 =
I.getOperand(0);
4267 Type *ValTy =
I.getType();
4268 bool OutOfUndef = isa<UndefValue>(Op0);
4276 unsigned NumValValues = ValValueVTs.
size();
4279 if (!NumValValues) {
4288 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4289 Values[i - LinearIndex] =
4298void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4299 Value *Op0 =
I.getOperand(0);
4310 bool IsVectorGEP =
I.getType()->isVectorTy();
4312 IsVectorGEP ? cast<VectorType>(
I.getType())->getElementCount()
4315 if (IsVectorGEP && !
N.getValueType().isVector()) {
4323 const Value *
Idx = GTI.getOperand();
4324 if (
StructType *StTy = GTI.getStructTypeOrNull()) {
4325 unsigned Field = cast<Constant>(
Idx)->getUniqueInteger().getZExtValue();
4353 bool ElementScalable = ElementSize.
isScalable();
4357 const auto *
C = dyn_cast<Constant>(
Idx);
4358 if (
C && isa<VectorType>(
C->getType()))
4359 C =
C->getSplatValue();
4361 const auto *CI = dyn_cast_or_null<ConstantInt>(
C);
4362 if (CI && CI->isZero())
4364 if (CI && !ElementScalable) {
4379 Flags.setNoUnsignedWrap(
true);
4392 VectorElementCount);
4409 if (ElementScalable) {
4410 EVT VScaleTy =
N.getValueType().getScalarType();
4421 if (ElementMul != 1) {
4422 if (ElementMul.isPowerOf2()) {
4423 unsigned Amt = ElementMul.logBase2();
4454 if (PtrMemTy != PtrTy && !cast<GEPOperator>(
I).isInBounds())
4460void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4467 Type *Ty =
I.getAllocatedType();
4471 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4495 if (*Alignment <= StackAlign)
4496 Alignment = std::nullopt;
4512 DAG.
getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4528 if (!
I.hasMetadata(LLVMContext::MD_noundef))
4530 return I.getMetadata(LLVMContext::MD_range);
4534 if (
const auto *CB = dyn_cast<CallBase>(&
I)) {
4536 if (CB->hasRetAttr(Attribute::NoUndef))
4537 return CB->getRange();
4541 return std::nullopt;
4544void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4546 return visitAtomicLoad(
I);
4549 const Value *SV =
I.getOperand(0);
4553 if (
const Argument *Arg = dyn_cast<Argument>(SV)) {
4554 if (Arg->hasSwiftErrorAttr())
4555 return visitLoadFromSwiftError(
I);
4558 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4559 if (Alloca->isSwiftError())
4560 return visitLoadFromSwiftError(
I);
4566 Type *Ty =
I.getType();
4570 unsigned NumValues = ValueVTs.
size();
4574 Align Alignment =
I.getAlign();
4577 bool isVolatile =
I.isVolatile();
4582 bool ConstantMemory =
false;
4595 ConstantMemory =
true;
4610 unsigned ChainI = 0;
4611 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4634 MMOFlags, AAInfo, Ranges);
4635 Chains[ChainI] =
L.getValue(1);
4637 if (MemVTs[i] != ValueVTs[i])
4643 if (!ConstantMemory) {
4656void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4658 "call visitStoreToSwiftError when backend supports swifterror");
4662 const Value *SrcV =
I.getOperand(0);
4664 SrcV->
getType(), ValueVTs, &Offsets, 0);
4665 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4666 "expect a single EVT for swifterror");
4675 SDValue(Src.getNode(), Src.getResNo()));
4679void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4681 "call visitLoadFromSwiftError when backend supports swifterror");
4684 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4685 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4686 "Support volatile, non temporal, invariant for load_from_swift_error");
4688 const Value *SV =
I.getOperand(0);
4689 Type *Ty =
I.getType();
4694 I.getAAMetadata()))) &&
4695 "load_from_swift_error should not be constant memory");
4700 ValueVTs, &Offsets, 0);
4701 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4702 "expect a single EVT for swifterror");
4712void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4714 return visitAtomicStore(
I);
4716 const Value *SrcV =
I.getOperand(0);
4717 const Value *PtrV =
I.getOperand(1);
4723 if (
const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4724 if (Arg->hasSwiftErrorAttr())
4725 return visitStoreToSwiftError(
I);
4728 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4729 if (Alloca->isSwiftError())
4730 return visitStoreToSwiftError(
I);
4737 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4738 unsigned NumValues = ValueVTs.
size();
4751 Align Alignment =
I.getAlign();
4756 unsigned ChainI = 0;
4757 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4774 if (MemVTs[i] != ValueVTs[i])
4777 DAG.
getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4778 Chains[ChainI] = St;
4787void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4788 bool IsCompressing) {
4794 Src0 =
I.getArgOperand(0);
4795 Ptr =
I.getArgOperand(1);
4796 Alignment = cast<ConstantInt>(
I.getArgOperand(2))->getAlignValue();
4797 Mask =
I.getArgOperand(3);
4802 Src0 =
I.getArgOperand(0);
4803 Ptr =
I.getArgOperand(1);
4804 Mask =
I.getArgOperand(2);
4805 Alignment =
I.getParamAlign(1).valueOrOne();
4808 Value *PtrOperand, *MaskOperand, *Src0Operand;
4811 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4813 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4823 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4868 assert(
Ptr->getType()->isVectorTy() &&
"Unexpected pointer type");
4871 if (
auto *
C = dyn_cast<Constant>(
Ptr)) {
4872 C =
C->getSplatValue();
4878 ElementCount NumElts = cast<VectorType>(
Ptr->getType())->getElementCount();
4887 if (!
GEP ||
GEP->getParent() != CurBB)
4890 if (
GEP->getNumOperands() != 2)
4893 const Value *BasePtr =
GEP->getPointerOperand();
4894 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
4900 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
4905 if (ScaleVal != 1 &&
4918void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
4926 Align Alignment = cast<ConstantInt>(
I.getArgOperand(2))
4927 ->getMaybeAlignValue()
4938 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
4958 Ops, MMO, IndexType,
false);
4963void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
4969 Ptr =
I.getArgOperand(0);
4970 Alignment = cast<ConstantInt>(
I.getArgOperand(1))->getAlignValue();
4971 Mask =
I.getArgOperand(2);
4972 Src0 =
I.getArgOperand(3);
4977 Ptr =
I.getArgOperand(0);
4978 Alignment =
I.getParamAlign(0).valueOrOne();
4979 Mask =
I.getArgOperand(1);
4980 Src0 =
I.getArgOperand(2);
4983 Value *PtrOperand, *MaskOperand, *Src0Operand;
4986 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4988 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
5006 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5032void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5042 Align Alignment = cast<ConstantInt>(
I.getArgOperand(1))
5043 ->getMaybeAlignValue()
5055 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
5101 AAMDNodes(),
nullptr, SSID, SuccessOrdering, FailureOrdering);
5104 dl, MemVT, VTs, InChain,
5115void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5118 switch (
I.getOperation()) {
5174void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5188void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5208 nullptr, SSID, Order);
5224void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5246 nullptr, SSID, Ordering);
5262void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5263 unsigned Intrinsic) {
5268 bool HasChain = !
F->doesNotAccessMemory();
5270 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5297 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5298 const Value *Arg =
I.getArgOperand(i);
5299 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5306 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5307 assert(CI->getBitWidth() <= 64 &&
5308 "large intrinsic immediates not handled");
5326 if (
auto *FPMO = dyn_cast<FPMathOperator>(&
I))
5327 Flags.copyFMF(*FPMO);
5334 auto *Token = Bundle->Inputs[0].get();
5336 assert(Ops.
back().getValueType() != MVT::Glue &&
5337 "Did not expected another glue node here.");
5345 if (IsTgtIntrinsic) {
5353 else if (
Info.fallbackAddressSpace)
5357 Info.size,
I.getAAMetadata());
5358 }
else if (!HasChain) {
5360 }
else if (!
I.getType()->isVoidTy()) {
5374 if (!
I.getType()->isVoidTy()) {
5375 if (!isa<VectorType>(
I.getType()))
5447 SDValue TwoToFractionalPartOfX;
5524 if (
Op.getValueType() == MVT::f32 &&
5548 if (
Op.getValueType() == MVT::f32 &&
5647 if (
Op.getValueType() == MVT::f32 &&
5731 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5744 if (
Op.getValueType() == MVT::f32 &&
5821 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5832 if (
Op.getValueType() == MVT::f32 &&
5845 bool IsExp10 =
false;
5846 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5850 IsExp10 = LHSC->isExactlyValue(Ten);
5877 unsigned Val = RHSC->getSExtValue();
5906 CurSquare, CurSquare);
5911 if (RHSC->getSExtValue() < 0)
5925 EVT VT =
LHS.getValueType();
5948 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
5952 Opcode, VT, ScaleInt);
5987 switch (
N.getOpcode()) {
5990 Regs.emplace_back(cast<RegisterSDNode>(
Op)->
getReg(),
5991 Op.getValueType().getSizeInBits());
6016bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6019 const Argument *Arg = dyn_cast<Argument>(V);
6033 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6040 auto *NewDIExpr = FragExpr;
6047 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6050 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6051 return BuildMI(MF,
DL, Inst, Indirect, Reg, Variable, FragExpr);
6055 if (Kind == FuncArgumentDbgValueKind::Value) {
6060 if (!IsInEntryBlock)
6076 bool VariableIsFunctionInputArg = Variable->
isParameter() &&
6077 !
DL->getInlinedAt();
6079 if (!IsInPrologue && !VariableIsFunctionInputArg)
6113 if (VariableIsFunctionInputArg) {
6118 return !NodeMap[
V].getNode();
6123 bool IsIndirect =
false;
6124 std::optional<MachineOperand>
Op;
6127 if (FI != std::numeric_limits<int>::max())
6131 if (!
Op &&
N.getNode()) {
6134 if (ArgRegsAndSizes.
size() == 1)
6135 Reg = ArgRegsAndSizes.
front().first;
6137 if (Reg &&
Reg.isVirtual()) {
6145 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6149 if (!
Op &&
N.getNode()) {
6154 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
6163 for (
const auto &RegAndSize : SplitRegs) {
6167 int RegFragmentSizeInBits = RegAndSize.second;
6169 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6172 if (
Offset >= ExprFragmentSizeInBits)
6176 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6177 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6182 Expr,
Offset, RegFragmentSizeInBits);
6183 Offset += RegAndSize.second;
6186 if (!FragmentExpr) {
6193 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6194 Kind != FuncArgumentDbgValueKind::Value);
6205 V->getType(), std::nullopt);
6206 if (RFV.occupiesMultipleRegs()) {
6207 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6212 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6213 }
else if (ArgRegsAndSizes.
size() > 1) {
6216 splitMultiRegDbgValue(ArgRegsAndSizes);
6225 "Expected inlined-at fields to agree");
6229 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6231 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6244 unsigned DbgSDNodeOrder) {
6245 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
6257 false, dl, DbgSDNodeOrder);
6260 false, dl, DbgSDNodeOrder);
6264 switch (Intrinsic) {
6265 case Intrinsic::smul_fix:
6267 case Intrinsic::umul_fix:
6269 case Intrinsic::smul_fix_sat:
6271 case Intrinsic::umul_fix_sat:
6273 case Intrinsic::sdiv_fix:
6275 case Intrinsic::udiv_fix:
6277 case Intrinsic::sdiv_fix_sat:
6279 case Intrinsic::udiv_fix_sat:
6286void SelectionDAGBuilder::lowerCallToExternalSymbol(
const CallInst &
I,
6287 const char *FunctionName) {
6288 assert(FunctionName &&
"FunctionName must not be nullptr");
6298 assert(cast<CallBase>(PreallocatedSetup)
6301 "expected call_preallocated_setup Value");
6302 for (
const auto *U : PreallocatedSetup->
users()) {
6303 auto *UseCall = cast<CallBase>(U);
6304 const Function *Fn = UseCall->getCalledFunction();
6305 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6315bool SelectionDAGBuilder::visitEntryValueDbgValue(
6322 const Argument *Arg = cast<Argument>(Values[0]);
6328 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6329 "couldn't find an associated register for the Argument\n");
6332 Register ArgVReg = ArgIt->getSecond();
6335 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6337 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6341 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6342 "couldn't find a physical register\n");
6347void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6348 unsigned Intrinsic) {
6350 switch (Intrinsic) {
6351 case Intrinsic::experimental_convergence_anchor:
6354 case Intrinsic::experimental_convergence_entry:
6357 case Intrinsic::experimental_convergence_loop: {
6359 auto *Token = Bundle->Inputs[0].get();
6367void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6368 unsigned IntrinsicID) {
6371 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6372 "Tried to lower unsupported histogram type");
6393 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
6419 Ops, MMO, IndexType);
6425void SelectionDAGBuilder::visitVectorExtractLastActive(
const CallInst &
I,
6426 unsigned Intrinsic) {
6427 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6428 "Tried lowering invalid vector extract last");
6444 EVT BoolVT =
Mask.getValueType().getScalarType();
6453void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6454 unsigned Intrinsic) {
6461 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
6462 Flags.copyFMF(*FPOp);
6464 switch (Intrinsic) {
6467 visitTargetIntrinsic(
I, Intrinsic);
6469 case Intrinsic::vscale: {
6474 case Intrinsic::vastart: visitVAStart(
I);
return;
6475 case Intrinsic::vaend: visitVAEnd(
I);
return;
6476 case Intrinsic::vacopy: visitVACopy(
I);
return;
6477 case Intrinsic::returnaddress:
6482 case Intrinsic::addressofreturnaddress:
6487 case Intrinsic::sponentry:
6492 case Intrinsic::frameaddress:
6497 case Intrinsic::read_volatile_register:
6498 case Intrinsic::read_register: {
6502 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6510 case Intrinsic::write_register: {
6512 Value *RegValue =
I.getArgOperand(1);
6515 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6520 case Intrinsic::memcpy: {
6521 const auto &MCI = cast<MemCpyInst>(
I);
6526 Align DstAlign = MCI.getDestAlign().valueOrOne();
6527 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6528 Align Alignment = std::min(DstAlign, SrcAlign);
6529 bool isVol = MCI.isVolatile();
6534 false, &
I, std::nullopt,
6537 I.getAAMetadata(),
AA);
6538 updateDAGForMaybeTailCall(MC);
6541 case Intrinsic::memcpy_inline: {
6542 const auto &MCI = cast<MemCpyInlineInst>(
I);
6546 assert(isa<ConstantSDNode>(
Size) &&
"memcpy_inline needs constant size");
6548 Align DstAlign = MCI.getDestAlign().valueOrOne();
6549 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6550 Align Alignment = std::min(DstAlign, SrcAlign);
6551 bool isVol = MCI.isVolatile();
6555 true, &
I, std::nullopt,
6558 I.getAAMetadata(),
AA);
6559 updateDAGForMaybeTailCall(MC);
6562 case Intrinsic::memset: {
6563 const auto &MSI = cast<MemSetInst>(
I);
6568 Align Alignment = MSI.getDestAlign().valueOrOne();
6569 bool isVol = MSI.isVolatile();
6572 Root, sdl, Op1, Op2, Op3, Alignment, isVol,
false,
6574 updateDAGForMaybeTailCall(MS);
6577 case Intrinsic::memset_inline: {
6578 const auto &MSII = cast<MemSetInlineInst>(
I);
6582 assert(isa<ConstantSDNode>(
Size) &&
"memset_inline needs constant size");
6584 Align DstAlign = MSII.getDestAlign().valueOrOne();
6585 bool isVol = MSII.isVolatile();
6591 updateDAGForMaybeTailCall(MC);
6594 case Intrinsic::memmove: {
6595 const auto &MMI = cast<MemMoveInst>(
I);
6600 Align DstAlign = MMI.getDestAlign().valueOrOne();
6601 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6602 Align Alignment = std::min(DstAlign, SrcAlign);
6603 bool isVol = MMI.isVolatile();
6611 I.getAAMetadata(),
AA);
6612 updateDAGForMaybeTailCall(MM);
6615 case Intrinsic::memcpy_element_unordered_atomic: {
6621 Type *LengthTy =
MI.getLength()->getType();
6622 unsigned ElemSz =
MI.getElementSizeInBytes();
6628 updateDAGForMaybeTailCall(MC);
6631 case Intrinsic::memmove_element_unordered_atomic: {
6632 auto &
MI = cast<AtomicMemMoveInst>(
I);
6637 Type *LengthTy =
MI.getLength()->getType();
6638 unsigned ElemSz =
MI.getElementSizeInBytes();
6644 updateDAGForMaybeTailCall(MC);
6647 case Intrinsic::memset_element_unordered_atomic: {
6648 auto &
MI = cast<AtomicMemSetInst>(
I);
6653 Type *LengthTy =
MI.getLength()->getType();
6654 unsigned ElemSz =
MI.getElementSizeInBytes();
6659 updateDAGForMaybeTailCall(MC);
6662 case Intrinsic::call_preallocated_setup: {
6671 case Intrinsic::call_preallocated_arg: {
6686 case Intrinsic::dbg_declare: {
6687 const auto &DI = cast<DbgDeclareInst>(
I);
6690 if (AssignmentTrackingEnabled ||
6693 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DI <<
"\n");
6699 assert(!DI.hasArgList() &&
"Only dbg.value should currently use DIArgList");
6704 case Intrinsic::dbg_label: {
6707 assert(Label &&
"Missing label");
6714 case Intrinsic::dbg_assign: {
6716 if (AssignmentTrackingEnabled)
6722 case Intrinsic::dbg_value: {
6724 if (AssignmentTrackingEnabled)
6744 SDNodeOrder, IsVariadic))
6750 case Intrinsic::eh_typeid_for: {
6759 case Intrinsic::eh_return_i32:
6760 case Intrinsic::eh_return_i64:
6768 case Intrinsic::eh_unwind_init:
6771 case Intrinsic::eh_dwarf_cfa:
6776 case Intrinsic::eh_sjlj_callsite: {
6777 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(0));
6783 case Intrinsic::eh_sjlj_functioncontext: {
6787 cast<AllocaInst>(
I.getArgOperand(0)->stripPointerCasts());
6792 case Intrinsic::eh_sjlj_setjmp: {
6802 case Intrinsic::eh_sjlj_longjmp:
6806 case Intrinsic::eh_sjlj_setup_dispatch:
6810 case Intrinsic::masked_gather:
6811 visitMaskedGather(
I);
6813 case Intrinsic::masked_load:
6816 case Intrinsic::masked_scatter:
6817 visitMaskedScatter(
I);
6819 case Intrinsic::masked_store:
6820 visitMaskedStore(
I);
6822 case Intrinsic::masked_expandload:
6823 visitMaskedLoad(
I,
true );
6825 case Intrinsic::masked_compressstore:
6826 visitMaskedStore(
I,
true );
6828 case Intrinsic::powi:
6832 case Intrinsic::log:
6835 case Intrinsic::log2:
6839 case Intrinsic::log10:
6843 case Intrinsic::exp:
6846 case Intrinsic::exp2:
6850 case Intrinsic::pow:
6854 case Intrinsic::sqrt:
6855 case Intrinsic::fabs:
6856 case Intrinsic::sin:
6857 case Intrinsic::cos:
6858 case Intrinsic::tan:
6859 case Intrinsic::asin:
6860 case Intrinsic::acos:
6861 case Intrinsic::atan:
6862 case Intrinsic::sinh:
6863 case Intrinsic::cosh:
6864 case Intrinsic::tanh:
6865 case Intrinsic::exp10:
6866 case Intrinsic::floor:
6867 case Intrinsic::ceil:
6868 case Intrinsic::trunc:
6869 case Intrinsic::rint:
6870 case Intrinsic::nearbyint:
6871 case Intrinsic::round:
6872 case Intrinsic::roundeven:
6873 case Intrinsic::canonicalize: {
6876 switch (Intrinsic) {
6878 case Intrinsic::sqrt: Opcode =
ISD::FSQRT;
break;
6879 case Intrinsic::fabs: Opcode =
ISD::FABS;
break;
6880 case Intrinsic::sin: Opcode =
ISD::FSIN;
break;
6881 case Intrinsic::cos: Opcode =
ISD::FCOS;
break;
6882 case Intrinsic::tan: Opcode =
ISD::FTAN;
break;
6883 case Intrinsic::asin: Opcode =
ISD::FASIN;
break;
6884 case Intrinsic::acos: Opcode =
ISD::FACOS;
break;
6885 case Intrinsic::atan: Opcode =
ISD::FATAN;
break;
6886 case Intrinsic::sinh: Opcode =
ISD::FSINH;
break;
6887 case Intrinsic::cosh: Opcode =
ISD::FCOSH;
break;
6888 case Intrinsic::tanh: Opcode =
ISD::FTANH;
break;
6889 case Intrinsic::exp10: Opcode =
ISD::FEXP10;
break;
6890 case Intrinsic::floor: Opcode =
ISD::FFLOOR;
break;
6891 case Intrinsic::ceil: Opcode =
ISD::FCEIL;
break;
6892 case Intrinsic::trunc: Opcode =
ISD::FTRUNC;
break;
6893 case Intrinsic::rint: Opcode =
ISD::FRINT;
break;
6895 case Intrinsic::round: Opcode =
ISD::FROUND;
break;
6906 case Intrinsic::atan2:
6912 case Intrinsic::lround:
6913 case Intrinsic::llround:
6914 case Intrinsic::lrint:
6915 case Intrinsic::llrint: {
6918 switch (Intrinsic) {
6920 case Intrinsic::lround: Opcode =
ISD::LROUND;
break;
6922 case Intrinsic::lrint: Opcode =
ISD::LRINT;
break;
6923 case Intrinsic::llrint: Opcode =
ISD::LLRINT;
break;
6932 case Intrinsic::minnum:
6938 case Intrinsic::maxnum:
6944 case Intrinsic::minimum:
6950 case Intrinsic::maximum:
6956 case Intrinsic::minimumnum:
6962 case Intrinsic::maximumnum:
6968 case Intrinsic::copysign:
6974 case Intrinsic::ldexp:
6980 case Intrinsic::sincos:
6981 case Intrinsic::frexp: {
6983 switch (Intrinsic) {
6986 case Intrinsic::sincos:
6989 case Intrinsic::frexp:
7000 case Intrinsic::arithmetic_fence: {
7006 case Intrinsic::fma:
7012#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
7013 case Intrinsic::INTRINSIC:
7014#include "llvm/IR/ConstrainedOps.def"
7015 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(
I));
7017#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
7018#include "llvm/IR/VPIntrinsics.def"
7019 visitVectorPredicationIntrinsic(cast<VPIntrinsic>(
I));
7021 case Intrinsic::fptrunc_round: {
7024 Metadata *MD = cast<MetadataAsValue>(
I.getArgOperand(1))->getMetadata();
7025 std::optional<RoundingMode> RoundMode =
7032 Flags.copyFMF(*cast<FPMathOperator>(&
I));
7043 case Intrinsic::fmuladd: {
7064 case Intrinsic::convert_to_fp16:
7071 case Intrinsic::convert_from_fp16:
7077 case Intrinsic::fptosi_sat: {
7084 case Intrinsic::fptoui_sat: {
7091 case Intrinsic::set_rounding:
7097 case Intrinsic::is_fpclass: {
7102 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
7107 Flags.setNoFPExcept(
7108 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7124 case Intrinsic::get_fpenv: {
7139 int SPFI = cast<FrameIndexSDNode>(Temp.
getNode())->getIndex();
7146 Res =
DAG.
getLoad(EnvVT, sdl, Chain, Temp, MPI);
7152 case Intrinsic::set_fpenv: {
7166 int SPFI = cast<FrameIndexSDNode>(Temp.
getNode())->getIndex();
7169 Chain =
DAG.
getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7179 case Intrinsic::reset_fpenv:
7182 case Intrinsic::get_fpmode:
7191 case Intrinsic::set_fpmode:
7196 case Intrinsic::reset_fpmode: {
7201 case Intrinsic::pcmarker: {
7206 case Intrinsic::readcyclecounter: {
7214 case Intrinsic::readsteadycounter: {
7222 case Intrinsic::bitreverse:
7227 case Intrinsic::bswap:
7232 case Intrinsic::cttz: {
7234 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(1));
7240 case Intrinsic::ctlz: {
7242 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(1));
7248 case Intrinsic::ctpop: {
7254 case Intrinsic::fshl:
7255 case Intrinsic::fshr: {
7256 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7260 EVT VT =
X.getValueType();
7271 case Intrinsic::sadd_sat: {
7277 case Intrinsic::uadd_sat: {
7283 case Intrinsic::ssub_sat: {
7289 case Intrinsic::usub_sat: {
7295 case Intrinsic::sshl_sat: {
7301 case Intrinsic::ushl_sat: {
7307 case Intrinsic::smul_fix:
7308 case Intrinsic::umul_fix:
7309 case Intrinsic::smul_fix_sat:
7310 case Intrinsic::umul_fix_sat: {
7318 case Intrinsic::sdiv_fix:
7319 case Intrinsic::udiv_fix:
7320 case Intrinsic::sdiv_fix_sat:
7321 case Intrinsic::udiv_fix_sat: {
7326 Op1, Op2, Op3,
DAG, TLI));
7329 case Intrinsic::smax: {
7335 case Intrinsic::smin: {
7341 case Intrinsic::umax: {
7347 case Intrinsic::umin: {
7353 case Intrinsic::abs: {
7359 case Intrinsic::scmp: {
7366 case Intrinsic::ucmp: {
7373 case Intrinsic::stacksave: {
7381 case Intrinsic::stackrestore:
7385 case Intrinsic::get_dynamic_area_offset: {
7400 case Intrinsic::stackguard: {
7421 case Intrinsic::stackprotector: {
7443 Chain, sdl, Src, FIN,
7450 case Intrinsic::objectsize:
7453 case Intrinsic::is_constant:
7456 case Intrinsic::annotation:
7457 case Intrinsic::ptr_annotation:
7458 case Intrinsic::launder_invariant_group:
7459 case Intrinsic::strip_invariant_group:
7464 case Intrinsic::assume:
7465 case Intrinsic::experimental_noalias_scope_decl:
7466 case Intrinsic::var_annotation:
7467 case Intrinsic::sideeffect:
7472 case Intrinsic::codeview_annotation: {
7476 Metadata *MD = cast<MetadataAsValue>(
I.getArgOperand(0))->getMetadata();
7483 case Intrinsic::init_trampoline: {
7484 const Function *
F = cast<Function>(
I.getArgOperand(1)->stripPointerCasts());
7499 case Intrinsic::adjust_trampoline:
7504 case Intrinsic::gcroot: {
7506 "only valid in functions with gc specified, enforced by Verifier");
7508 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7509 const Constant *TypeMap = cast<Constant>(
I.getArgOperand(1));
7515 case Intrinsic::gcread:
7516 case Intrinsic::gcwrite:
7518 case Intrinsic::get_rounding:
7524 case Intrinsic::expect:
7525 case Intrinsic::expect_with_probability:
7531 case Intrinsic::ubsantrap:
7532 case Intrinsic::debugtrap:
7533 case Intrinsic::trap: {
7535 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7536 if (TrapFuncName.
empty()) {
7537 switch (Intrinsic) {
7538 case Intrinsic::trap:
7541 case Intrinsic::debugtrap:
7544 case Intrinsic::ubsantrap:
7548 cast<ConstantInt>(
I.getArgOperand(0))->getZExtValue(), sdl,
7554 I.hasFnAttr(Attribute::NoMerge));
7558 if (Intrinsic == Intrinsic::ubsantrap) {
7560 Args[0].Val =
I.getArgOperand(0);
7562 Args[0].Ty =
Args[0].Val->getType();
7566 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7571 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7577 case Intrinsic::allow_runtime_check:
7578 case Intrinsic::allow_ubsan_check:
7582 case Intrinsic::uadd_with_overflow:
7583 case Intrinsic::sadd_with_overflow:
7584 case Intrinsic::usub_with_overflow:
7585 case Intrinsic::ssub_with_overflow:
7586 case Intrinsic::umul_with_overflow:
7587 case Intrinsic::smul_with_overflow: {
7589 switch (Intrinsic) {
7591 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7592 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7593 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7594 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7595 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7596 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7602 EVT OverflowVT = MVT::i1;
7611 case Intrinsic::prefetch: {
7613 unsigned rw = cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue();
7626 std::nullopt, Flags);
7635 case Intrinsic::lifetime_start:
7636 case Intrinsic::lifetime_end: {
7637 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7642 const int64_t ObjectSize =
7643 cast<ConstantInt>(
I.getArgOperand(0))->getSExtValue();
7648 for (
const Value *Alloca : Allocas) {
7649 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7652 if (!LifetimeObject)
7672 case Intrinsic::pseudoprobe: {
7673 auto Guid = cast<ConstantInt>(
I.getArgOperand(0))->getZExtValue();
7674 auto Index = cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue();
7675 auto Attr = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
7680 case Intrinsic::invariant_start:
7685 case Intrinsic::invariant_end:
7688 case Intrinsic::clear_cache: {
7693 {InputChain, StartVal, EndVal});
7698 case Intrinsic::donothing:
7699 case Intrinsic::seh_try_begin:
7700 case Intrinsic::seh_scope_begin:
7701 case Intrinsic::seh_try_end:
7702 case Intrinsic::seh_scope_end:
7705 case Intrinsic::experimental_stackmap:
7708 case Intrinsic::experimental_patchpoint_void:
7709 case Intrinsic::experimental_patchpoint:
7712 case Intrinsic::experimental_gc_statepoint:
7715 case Intrinsic::experimental_gc_result:
7716 visitGCResult(cast<GCResultInst>(
I));
7718 case Intrinsic::experimental_gc_relocate:
7719 visitGCRelocate(cast<GCRelocateInst>(
I));
7721 case Intrinsic::instrprof_cover:
7723 case Intrinsic::instrprof_increment:
7725 case Intrinsic::instrprof_timestamp:
7727 case Intrinsic::instrprof_value_profile:
7729 case Intrinsic::instrprof_mcdc_parameters:
7731 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7733 case Intrinsic::localescape: {
7739 for (
unsigned Idx = 0, E =
I.arg_size();
Idx < E; ++
Idx) {
7740 Value *Arg =
I.getArgOperand(
Idx)->stripPointerCasts();
7741 if (isa<ConstantPointerNull>(Arg))
7745 "can only escape static allocas");
7750 TII->get(TargetOpcode::LOCAL_ESCAPE))
7758 case Intrinsic::localrecover: {
7763 auto *Fn = cast<Function>(
I.getArgOperand(0)->stripPointerCasts());
7764 auto *
Idx = cast<ConstantInt>(
I.getArgOperand(2));
7766 unsigned(
Idx->getLimitedValue(std::numeric_limits<int>::max()));
7787 case Intrinsic::fake_use: {
7788 Value *
V =
I.getArgOperand(0);
7793 auto FakeUseValue = [&]() ->
SDValue {
7803 if (isa<Constant>(V))
7807 if (!FakeUseValue || FakeUseValue.isUndef())
7810 Ops[1] = FakeUseValue;
7813 if (!Ops[1] || Ops[1].
isUndef())
7819 case Intrinsic::eh_exceptionpointer:
7820 case Intrinsic::eh_exceptioncode: {
7822 const auto *CPI = cast<CatchPadInst>(
I.getArgOperand(0));
7827 if (Intrinsic == Intrinsic::eh_exceptioncode)
7832 case Intrinsic::xray_customevent: {
7861 case Intrinsic::xray_typedevent: {
7888 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7894 case Intrinsic::experimental_deoptimize:
7897 case Intrinsic::stepvector:
7900 case Intrinsic::vector_reduce_fadd:
7901 case Intrinsic::vector_reduce_fmul:
7902 case Intrinsic::vector_reduce_add:
7903 case Intrinsic::vector_reduce_mul:
7904 case Intrinsic::vector_reduce_and:
7905 case Intrinsic::vector_reduce_or:
7906 case Intrinsic::vector_reduce_xor:
7907 case Intrinsic::vector_reduce_smax:
7908 case Intrinsic::vector_reduce_smin:
7909 case Intrinsic::vector_reduce_umax:
7910 case Intrinsic::vector_reduce_umin:
7911 case Intrinsic::vector_reduce_fmax:
7912 case Intrinsic::vector_reduce_fmin:
7913 case Intrinsic::vector_reduce_fmaximum:
7914 case Intrinsic::vector_reduce_fminimum:
7915 visitVectorReduce(
I, Intrinsic);
7918 case Intrinsic::icall_branch_funnel: {
7927 "llvm.icall.branch.funnel operand must be a GlobalValue");
7930 struct BranchFunnelTarget {
7936 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7939 if (ElemBase !=
Base)
7941 "to the same GlobalValue");
7944 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7947 "llvm.icall.branch.funnel operand must be a GlobalValue");
7953 [](
const BranchFunnelTarget &T1,
const BranchFunnelTarget &T2) {
7954 return T1.Offset < T2.Offset;
7957 for (
auto &
T : Targets) {
7972 case Intrinsic::wasm_landingpad_index:
7978 case Intrinsic::aarch64_settag:
7979 case Intrinsic::aarch64_settag_zero: {
7981 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
7990 case Intrinsic::amdgcn_cs_chain: {
7991 assert(
I.arg_size() == 5 &&
"Additional args not supported yet");
7992 assert(cast<ConstantInt>(
I.getOperand(4))->isZero() &&
7993 "Non-zero flags not supported yet");
8009 for (
unsigned Idx : {2, 3, 1}) {
8012 Arg.
Ty =
I.getOperand(
Idx)->getType();
8014 Args.push_back(Arg);
8017 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
8018 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
8019 Args[2].IsInReg =
true;
8024 .setCallee(
CC,
RetTy, Callee, std::move(Args))
8027 .setConvergent(
I.isConvergent());
8029 std::pair<SDValue, SDValue>
Result =
8033 "Should've lowered as tail call");
8038 case Intrinsic::ptrmask: {
8058 case Intrinsic::threadlocal_address: {
8062 case Intrinsic::get_active_lane_mask: {
8065 EVT ElementVT =
Index.getValueType();
8068 visitTargetIntrinsic(
I, Intrinsic);
8086 case Intrinsic::experimental_get_vector_length: {
8087 assert(cast<ConstantInt>(
I.getOperand(1))->getSExtValue() > 0 &&
8088 "Expected positive VF");
8089 unsigned VF = cast<ConstantInt>(
I.getOperand(1))->getZExtValue();
8090 bool IsScalable = cast<ConstantInt>(
I.getOperand(2))->isOne();
8096 visitTargetIntrinsic(
I, Intrinsic);
8105 if (CountVT.
bitsLT(VT)) {
8120 case Intrinsic::experimental_vector_partial_reduce_add: {
8123 visitTargetIntrinsic(
I, Intrinsic);
8132 case Intrinsic::experimental_cttz_elts: {
8135 EVT OpVT =
Op.getValueType();
8138 visitTargetIntrinsic(
I, Intrinsic);
8153 !cast<ConstantSDNode>(
getValue(
I.getOperand(1)))->isZero();
8155 if (isa<ScalableVectorType>(
I.getOperand(0)->getType()))
8183 case Intrinsic::vector_insert: {
8191 if (
Index.getValueType() != VectorIdxTy)
8199 case Intrinsic::vector_extract: {
8207 if (
Index.getValueType() != VectorIdxTy)
8214 case Intrinsic::experimental_vector_match: {
8220 EVT ResVT =
Mask.getValueType();
8226 visitTargetIntrinsic(
I, Intrinsic);
8232 for (
unsigned i = 0; i < SearchSize; ++i) {
8244 case Intrinsic::vector_reverse:
8245 visitVectorReverse(
I);
8247 case Intrinsic::vector_splice:
8248 visitVectorSplice(
I);
8250 case Intrinsic::callbr_landingpad:
8251 visitCallBrLandingPad(
I);
8253 case Intrinsic::vector_interleave2:
8254 visitVectorInterleave(
I);
8256 case Intrinsic::vector_deinterleave2:
8257 visitVectorDeinterleave(
I);
8259 case Intrinsic::experimental_vector_compress:
8266 case Intrinsic::experimental_convergence_anchor:
8267 case Intrinsic::experimental_convergence_entry:
8268 case Intrinsic::experimental_convergence_loop:
8269 visitConvergenceControl(
I, Intrinsic);
8271 case Intrinsic::experimental_vector_histogram_add: {
8272 visitVectorHistogram(
I, Intrinsic);
8275 case Intrinsic::experimental_vector_extract_last_active: {
8276 visitVectorExtractLastActive(
I, Intrinsic);
8282void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8310 PendingConstrainedFP.push_back(OutChain);
8316 PendingConstrainedFPStrict.push_back(OutChain);
8328 Flags.setNoFPExcept(
true);
8330 if (
auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
8331 Flags.copyFMF(*FPOp);
8336#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8337 case Intrinsic::INTRINSIC: \
8338 Opcode = ISD::STRICT_##DAGN; \
8340#include "llvm/IR/ConstrainedOps.def"
8341 case Intrinsic::experimental_constrained_fmuladd: {
8348 pushOutChain(
Mul, EB);
8369 auto *
FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
8379 pushOutChain(Result, EB);
8386 std::optional<unsigned> ResOPC;
8388 case Intrinsic::vp_ctlz: {
8389 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8390 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8393 case Intrinsic::vp_cttz: {
8394 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8395 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8398 case Intrinsic::vp_cttz_elts: {
8399 bool IsZeroPoison = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8400 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8403#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8404 case Intrinsic::VPID: \
8405 ResOPC = ISD::VPSD; \
8407#include "llvm/IR/VPIntrinsics.def"
8412 "Inconsistency: no SDNode available for this VPIntrinsic!");
8414 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8415 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8417 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8418 : ISD::VP_REDUCE_FMUL;
8424void SelectionDAGBuilder::visitVPLoad(
8450void SelectionDAGBuilder::visitVPGather(
8486 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8492void SelectionDAGBuilder::visitVPStore(
8496 EVT VT = OpValues[0].getValueType();
8514void SelectionDAGBuilder::visitVPScatter(
8519 EVT VT = OpValues[0].getValueType();
8549 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8550 OpValues[2], OpValues[3]},
8556void SelectionDAGBuilder::visitVPStridedLoad(
8575 OpValues[2], OpValues[3], MMO,
8583void SelectionDAGBuilder::visitVPStridedStore(
8587 EVT VT = OpValues[0].getValueType();
8599 DAG.
getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8607void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8632 "Unexpected target EVL type");
8641void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8648 if (
const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8649 return visitVPCmp(*CmpI);
8660 "Unexpected target EVL type");
8664 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8666 if (
I == EVLParamPos)
8674 if (
auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8681 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8683 case ISD::VP_GATHER:
8684 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8686 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8687 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8690 visitVPStore(VPIntrin, OpValues);
8692 case ISD::VP_SCATTER:
8693 visitVPScatter(VPIntrin, OpValues);
8695 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8696 visitVPStridedStore(VPIntrin, OpValues);
8698 case ISD::VP_FMULADD: {
8699 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8701 if (
auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8708 ISD::VP_FMUL,
DL, VTs,
8709 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8712 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8717 case ISD::VP_IS_FPCLASS: {
8720 auto Constant = OpValues[1]->getAsZExtVal();
8723 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8727 case ISD::VP_INTTOPTR: {
8738 case ISD::VP_PTRTOINT: {
8753 case ISD::VP_CTLZ_ZERO_UNDEF:
8755 case ISD::VP_CTTZ_ZERO_UNDEF:
8756 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8757 case ISD::VP_CTTZ_ELTS: {
8759 DAG.
getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8778 if (CallSiteIndex) {
8792 assert(BeginLabel &&
"BeginLabel should've been set");
8806 assert(
II &&
"II should've been set");
8817std::pair<SDValue, SDValue>
8831 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
8834 "Non-null chain expected with non-tail call!");
8835 assert((Result.second.getNode() || !Result.first.getNode()) &&
8836 "Null value expected with tail call!");
8838 if (!Result.second.getNode()) {
8845 PendingExports.clear();
8860 bool isTailCall,
bool isMustTailCall,
8870 const Value *SwiftErrorVal =
nullptr;
8876 auto *Caller = CB.
getParent()->getParent();
8877 if (Caller->getFnAttribute(
"disable-tail-calls").getValueAsString() ==
8878 "true" && !isMustTailCall)
8885 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8894 if (V->getType()->isEmptyTy())
8898 Entry.Node = ArgNode; Entry.Ty = V->getType();
8900 Entry.setAttributes(&CB,
I - CB.
arg_begin());
8912 Args.push_back(Entry);
8916 if (Entry.IsSRet && isa<Instruction>(V))
8924 Value *V = Bundle->Inputs[0];
8926 Entry.Node = ArgNode;
8927 Entry.Ty = V->getType();
8928 Entry.IsCFGuardTarget =
true;
8929 Args.push_back(Entry);
8947 "Target doesn't support calls with kcfi operand bundles.");
8948 CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8955 auto *Token = Bundle->Inputs[0].get();
8956 ConvControlToken =
getValue(Token);
8974 "This target doesn't support calls with ptrauth operand bundles.");
8978 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
8980 if (Result.first.getNode()) {
9002 if (
const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
9021 bool ConstantMemory =
false;
9026 ConstantMemory =
true;
9037 if (!ConstantMemory)
9044void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
9058bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
9059 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
9073 if (Res.first.getNode()) {
9074 processIntegerCallValue(
I, Res.first,
true);
9088 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
9111 switch (NumBitsToCompare) {
9123 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9141 processIntegerCallValue(
I, Cmp,
false);
9150bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9151 const Value *Src =
I.getArgOperand(0);
9156 std::pair<SDValue, SDValue> Res =
9160 if (Res.first.getNode()) {
9174bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9182 Align Alignment = std::min(DstAlign, SrcAlign);
9191 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9195 "** memcpy should not be lowered as TailCall in mempcpy context **");
9213bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9214 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9217 std::pair<SDValue, SDValue> Res =
9222 if (Res.first.getNode()) {
9236bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9237 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9240 std::pair<SDValue, SDValue> Res =
9245 if (Res.first.getNode()) {
9246 processIntegerCallValue(
I, Res.first,
true);
9259bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9260 const Value *Arg0 =
I.getArgOperand(0);
9263 std::pair<SDValue, SDValue> Res =
9266 if (Res.first.getNode()) {
9267 processIntegerCallValue(
I, Res.first,
false);
9280bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9281 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9284 std::pair<SDValue, SDValue> Res =
9288 if (Res.first.getNode()) {
9289 processIntegerCallValue(
I, Res.first,
false);
9302bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9305 if (!
I.onlyReadsMemory())
9309 Flags.copyFMF(cast<FPMathOperator>(
I));
9322bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9325 if (!
I.onlyReadsMemory())
9329 Flags.copyFMF(cast<FPMathOperator>(
I));
9338void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9340 if (
I.isInlineAsm()) {
9348 if (
F->isDeclaration()) {
9350 unsigned IID =
F->getIntrinsicID();
9353 IID =
II->getIntrinsicID(
F);
9356 visitIntrinsicCall(
I, IID);
9365 if (!
I.isNoBuiltin() && !
I.isStrictFP() && !
F->hasLocalLinkage() &&
9371 if (visitMemCmpBCmpCall(
I))
9374 case LibFunc_copysign:
9375 case LibFunc_copysignf:
9376 case LibFunc_copysignl:
9379 if (
I.onlyReadsMemory()) {
9383 LHS.getValueType(), LHS, RHS));
9405 case LibFunc_fminimum_num:
9406 case LibFunc_fminimum_numf:
9407 case LibFunc_fminimum_numl:
9411 case LibFunc_fmaximum_num:
9412 case LibFunc_fmaximum_numf:
9413 case LibFunc_fmaximum_numl:
9454 case LibFunc_atan2f:
9455 case LibFunc_atan2l:
9480 case LibFunc_sqrt_finite:
9481 case LibFunc_sqrtf_finite:
9482 case LibFunc_sqrtl_finite:
9487 case LibFunc_floorf:
9488 case LibFunc_floorl:
9492 case LibFunc_nearbyint:
9493 case LibFunc_nearbyintf:
9494 case LibFunc_nearbyintl:
9511 case LibFunc_roundf:
9512 case LibFunc_roundl:
9517 case LibFunc_truncf:
9518 case LibFunc_truncl:
9535 case LibFunc_exp10f:
9536 case LibFunc_exp10l:
9541 case LibFunc_ldexpf:
9542 case LibFunc_ldexpl:
9546 case LibFunc_memcmp:
9547 if (visitMemCmpBCmpCall(
I))
9550 case LibFunc_mempcpy:
9551 if (visitMemPCpyCall(
I))
9554 case LibFunc_memchr:
9555 if (visitMemChrCall(
I))
9558 case LibFunc_strcpy:
9559 if (visitStrCpyCall(
I,
false))
9562 case LibFunc_stpcpy:
9563 if (visitStrCpyCall(
I,
true))
9566 case LibFunc_strcmp:
9567 if (visitStrCmpCall(
I))
9570 case LibFunc_strlen:
9571 if (visitStrLenCall(
I))
9574 case LibFunc_strnlen:
9575 if (visitStrNLenCall(
I))
9590 assert(!
I.hasOperandBundlesOtherThan(
9591 {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
9592 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
9593 LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi,
9594 LLVMContext::OB_convergencectrl}) &&
9595 "Cannot lower calls with arbitrary operand bundles!");
9599 if (
I.hasDeoptState())
9615 const auto *Key = cast<ConstantInt>(PAB->Inputs[0]);
9616 const Value *Discriminator = PAB->Inputs[1];
9618 assert(Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9619 assert(Discriminator->getType()->isIntegerTy(64) &&
9620 "Invalid ptrauth discriminator");
9624 if (
const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CalleeV))
9625 if (CalleeCPA->isKnownCompatibleWith(Key, Discriminator,
9631 assert(!isa<Function>(CalleeV) &&
"invalid direct ptrauth call");
9666 for (
const auto &Code : Codes)
9681 SDISelAsmOperandInfo &MatchingOpInfo,
9683 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9689 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9691 OpInfo.ConstraintVT);
9692 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9694 MatchingOpInfo.ConstraintVT);
9695 const bool OutOpIsIntOrFP =
9696 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9697 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9698 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9699 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9702 " with a matching output constraint of"
9703 " incompatible type!");
9705 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9712 SDISelAsmOperandInfo &OpInfo,
9725 const Value *OpVal = OpInfo.CallOperandVal;
9726 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9727 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9743 DL.getPrefTypeAlign(Ty),
false,
9746 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9749 OpInfo.CallOperand = StackSlot;
9762static std::optional<unsigned>
9764 SDISelAsmOperandInfo &OpInfo,
9765 SDISelAsmOperandInfo &RefOpInfo) {
9776 return std::nullopt;
9780 unsigned AssignedReg;
9783 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9786 return std::nullopt;
9791 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9793 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9802 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9807 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9812 OpInfo.CallOperand =
9814 OpInfo.ConstraintVT = RegVT;
9818 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9821 OpInfo.CallOperand =
9823 OpInfo.ConstraintVT = VT;
9830 if (OpInfo.isMatchingInputConstraint())
9831 return std::nullopt;
9833 EVT ValueVT = OpInfo.ConstraintVT;
9834 if (OpInfo.ConstraintVT == MVT::Other)
9838 unsigned NumRegs = 1;
9839 if (OpInfo.ConstraintVT != MVT::Other)
9854 I = std::find(
I, RC->
end(), AssignedReg);
9855 if (
I == RC->
end()) {
9858 return {AssignedReg};
9862 for (; NumRegs; --NumRegs, ++
I) {
9863 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
9868 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
9869 return std::nullopt;
9874 const std::vector<SDValue> &AsmNodeOperands) {
9877 for (; OperandNo; --OperandNo) {
9879 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9882 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
9883 "Skipped past definitions?");
9884 CurOp +=
F.getNumOperandRegisters() + 1;
9895 explicit ExtraFlags(
const CallBase &Call) {
9897 if (
IA->hasSideEffects())
9899 if (
IA->isAlignStack())
9901 if (
Call.isConvergent())
9922 unsigned get()
const {
return Flags; }
9929 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(
Op)) {
9930 auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9945void SelectionDAGBuilder::visitInlineAsm(
const CallBase &Call,
9958 bool HasSideEffect =
IA->hasSideEffects();
9959 ExtraFlags ExtraInfo(Call);
9961 for (
auto &
T : TargetConstraints) {
9962 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
9963 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
9965 if (OpInfo.CallOperandVal)
9966 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
9969 HasSideEffect = OpInfo.hasMemory(TLI);
9978 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9981 return emitInlineAsmError(Call,
"constraint '" +
Twine(
T.ConstraintCode) +
9982 "' expects an integer constant "
9985 ExtraInfo.update(
T);
9992 bool EmitEHLabels = isa<InvokeInst>(Call);
9994 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
9996 bool IsCallBr = isa<CallBrInst>(Call);
9998 if (IsCallBr || EmitEHLabels) {
10006 if (EmitEHLabels) {
10007 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10012 IA->collectAsmStrs(AsmStrs);
10015 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10023 if (OpInfo.hasMatchingInput()) {
10024 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10055 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
10058 OpInfo.isIndirect =
false;
10065 !OpInfo.isIndirect) {
10066 assert((OpInfo.isMultipleAlternative ||
10068 "Can only indirectify direct input operands!");
10074 OpInfo.CallOperandVal =
nullptr;
10077 OpInfo.isIndirect =
true;
10083 std::vector<SDValue> AsmNodeOperands;
10084 AsmNodeOperands.push_back(
SDValue());
10091 const MDNode *SrcLoc =
Call.getMetadata(
"srcloc");
10101 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10103 SDISelAsmOperandInfo &RefOpInfo =
10104 OpInfo.isMatchingInputConstraint()
10105 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10107 const auto RegError =
10112 const char *
RegName =
TRI.getName(*RegError);
10113 emitInlineAsmError(Call,
"register '" +
Twine(
RegName) +
10114 "' allocated for constraint '" +
10115 Twine(OpInfo.ConstraintCode) +
10116 "' does not match required type");
10120 auto DetectWriteToReservedRegister = [&]() {
10123 for (
unsigned Reg : OpInfo.AssignedRegs.Regs) {
10125 TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
10127 emitInlineAsmError(Call,
"write to reserved register '" +
10136 !OpInfo.isMatchingInputConstraint())) &&
10137 "Only address as input operand is allowed.");
10139 switch (OpInfo.Type) {
10145 "Failed to convert memory constraint code to constraint id.");
10149 OpFlags.setMemConstraint(ConstraintID);
10152 AsmNodeOperands.push_back(OpInfo.CallOperand);
10157 if (OpInfo.AssignedRegs.Regs.empty()) {
10158 emitInlineAsmError(
10159 Call,
"couldn't allocate output register for constraint '" +
10160 Twine(OpInfo.ConstraintCode) +
"'");
10164 if (DetectWriteToReservedRegister())
10169 OpInfo.AssignedRegs.AddInlineAsmOperands(
10178 SDValue InOperandVal = OpInfo.CallOperand;
10180 if (OpInfo.isMatchingInputConstraint()) {
10186 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
10187 if (OpInfo.isIndirect) {
10189 emitInlineAsmError(Call,
"inline asm not supported yet: "
10190 "don't know how to handle tied "
10191 "indirect register inputs");
10199 auto *
R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
10201 MVT RegVT =
R->getSimpleValueType(0);
10205 :
TRI.getMinimalPhysRegClass(TiedReg);
10206 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
10213 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &Call);
10215 OpInfo.getMatchedOperand(), dl,
DAG,
10220 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
10221 assert(
Flag.getNumOperandRegisters() == 1 &&
10222 "Unexpected number of operands");
10225 Flag.clearMemConstraint();
10226 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10229 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10240 std::vector<SDValue> Ops;
10245 if (isa<ConstantSDNode>(InOperandVal)) {
10246 emitInlineAsmError(Call,
"value out of range for constraint '" +
10247 Twine(OpInfo.ConstraintCode) +
"'");
10251 emitInlineAsmError(Call,
10252 "invalid operand for inline asm constraint '" +
10253 Twine(OpInfo.ConstraintCode) +
"'");
10266 assert((OpInfo.isIndirect ||
10268 "Operand must be indirect to be a mem!");
10271 "Memory operands expect pointer values");
10276 "Failed to convert memory constraint code to constraint id.");
10280 ResOpType.setMemConstraint(ConstraintID);
10284 AsmNodeOperands.push_back(InOperandVal);
10292 "Failed to convert memory constraint code to constraint id.");
10296 SDValue AsmOp = InOperandVal;
10298 auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
10306 ResOpType.setMemConstraint(ConstraintID);
10308 AsmNodeOperands.push_back(
10311 AsmNodeOperands.push_back(AsmOp);
10317 emitInlineAsmError(Call,
"unknown asm constraint '" +
10318 Twine(OpInfo.ConstraintCode) +
"'");
10323 if (OpInfo.isIndirect) {
10324 emitInlineAsmError(
10325 Call,
"Don't know how to handle indirect register inputs yet "
10326 "for constraint '" +
10327 Twine(OpInfo.ConstraintCode) +
"'");
10332 if (OpInfo.AssignedRegs.Regs.empty()) {
10333 emitInlineAsmError(Call,
10334 "couldn't allocate input reg for constraint '" +
10335 Twine(OpInfo.ConstraintCode) +
"'");
10339 if (DetectWriteToReservedRegister())
10344 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue,
10348 0, dl,
DAG, AsmNodeOperands);
10354 if (!OpInfo.AssignedRegs.Regs.empty())
10364 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10368 DAG.
getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10379 if (
StructType *StructResult = dyn_cast<StructType>(CallResultType))
10380 ResultTypes = StructResult->elements();
10381 else if (!CallResultType->
isVoidTy())
10382 ResultTypes =
ArrayRef(CallResultType);
10384 auto CurResultType = ResultTypes.
begin();
10385 auto handleRegAssign = [&](
SDValue V) {
10386 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10387 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10400 if (ResultVT !=
V.getValueType() &&
10403 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10404 V.getValueType().isInteger()) {
10410 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10416 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10420 if (OpInfo.AssignedRegs.Regs.empty())
10423 switch (OpInfo.ConstraintType) {
10427 Chain, &Glue, &Call);
10439 assert(
false &&
"Unexpected unknown constraint");
10443 if (OpInfo.isIndirect) {
10444 const Value *
Ptr = OpInfo.CallOperandVal;
10445 assert(
Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10451 assert(!
Call.getType()->isVoidTy() &&
"Bad inline asm!");
10454 handleRegAssign(V);
10456 handleRegAssign(Val);
10462 if (!ResultValues.
empty()) {
10463 assert(CurResultType == ResultTypes.
end() &&
10464 "Mismatch in number of ResultTypes");
10466 "Mismatch in number of output operands in asm result");
10474 if (!OutChains.
empty())
10477 if (EmitEHLabels) {
10478 Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
10482 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10487void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &Call,
10488 const Twine &Message) {
10497 if (ValueVTs.
empty())
10501 for (
const EVT &VT : ValueVTs)
10507void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10514void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10520 DL.getABITypeAlign(
I.getType()).value());
10523 if (
I.getType()->isPointerTy())
10529void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10536void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10548 std::optional<ConstantRange> CR =
getRange(
I);
10550 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10553 APInt Lo = CR->getUnsignedMin();
10554 if (!
Lo.isMinValue())
10557 APInt Hi = CR->getUnsignedMax();
10558 unsigned Bits = std::max(
Hi.getActiveBits(),
10567 unsigned NumVals =
Op.getNode()->getNumValues();
10574 for (
unsigned I = 1;
I != NumVals; ++
I)
10588 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10591 Args.reserve(NumArgs);
10595 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10596 ArgI != ArgE; ++ArgI) {
10597 const Value *V = Call->getOperand(ArgI);
10599 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10603 Entry.Ty = V->getType();
10604 Entry.setAttributes(Call, ArgI);
10605 Args.push_back(Entry);
10610 .
setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10639 for (
unsigned I = StartIdx;
I < Call.arg_size();
I++) {
10654void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10688 assert(
ID.getValueType() == MVT::i64);
10719void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10735 if (
auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10738 else if (
auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10740 SDLoc(SymbolicCallee),
10741 SymbolicCallee->getValueType(0));
10751 "Not enough arguments provided to the patchpoint intrinsic");
10754 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10772 "Expected a callseq node.");
10774 bool HasGlue =
Call->getGluedNode();
10804 unsigned NumCallRegArgs =
Call->getNumOperands() - (HasGlue ? 4 : 3);
10805 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10814 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10825 if (IsAnyRegCC && HasDef) {
10830 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
10854 if (IsAnyRegCC && HasDef) {
10866void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
10867 unsigned Intrinsic) {
10871 if (
I.arg_size() > 1)
10877 if (
auto *FPMO = dyn_cast<FPMathOperator>(&
I))
10880 switch (Intrinsic) {
10881 case Intrinsic::vector_reduce_fadd:
10889 case Intrinsic::vector_reduce_fmul:
10897 case Intrinsic::vector_reduce_add:
10900 case Intrinsic::vector_reduce_mul:
10903 case Intrinsic::vector_reduce_and:
10906 case Intrinsic::vector_reduce_or:
10909 case Intrinsic::vector_reduce_xor:
10912 case Intrinsic::vector_reduce_smax:
10915 case Intrinsic::vector_reduce_smin:
10918 case Intrinsic::vector_reduce_umax:
10921 case Intrinsic::vector_reduce_umin:
10924 case Intrinsic::vector_reduce_fmax:
10927 case Intrinsic::vector_reduce_fmin:
10930 case Intrinsic::vector_reduce_fmaximum:
10933 case Intrinsic::vector_reduce_fminimum:
10947 Attrs.push_back(Attribute::SExt);
10949 Attrs.push_back(Attribute::ZExt);
10951 Attrs.push_back(Attribute::InReg);
10961std::pair<SDValue, SDValue>
10974 RetTys.
swap(OldRetTys);
10975 Offsets.swap(OldOffsets);
10977 for (
size_t i = 0, e = OldRetTys.
size(); i != e; ++i) {
10978 EVT RetVT = OldRetTys[i];
10982 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
10983 RetTys.
append(NumRegs, RegisterVT);
10984 for (
unsigned j = 0; j != NumRegs; ++j)
10997 int DemoteStackIdx = -100;
11007 Type *StackSlotPtrType =
11012 Entry.Node = DemoteStackSlot;
11013 Entry.Ty = StackSlotPtrType;
11014 Entry.IsSExt =
false;
11015 Entry.IsZExt =
false;
11016 Entry.IsInReg =
false;
11017 Entry.IsSRet =
true;
11018 Entry.IsNest =
false;
11019 Entry.IsByVal =
false;
11020 Entry.IsByRef =
false;
11021 Entry.IsReturned =
false;
11022 Entry.IsSwiftSelf =
false;
11023 Entry.IsSwiftAsync =
false;
11024 Entry.IsSwiftError =
false;
11025 Entry.IsCFGuardTarget =
false;
11026 Entry.Alignment = Alignment;
11038 for (
unsigned I = 0, E = RetTys.
size();
I != E; ++
I) {
11040 if (NeedsRegBlock) {
11041 Flags.setInConsecutiveRegs();
11042 if (
I == RetTys.
size() - 1)
11043 Flags.setInConsecutiveRegsLast();
11045 EVT VT = RetTys[
I];
11050 for (
unsigned i = 0; i != NumRegs; ++i) {
11052 MyFlags.
Flags = Flags;
11053 MyFlags.
VT = RegisterVT;
11054 MyFlags.
ArgVT = VT;
11059 cast<PointerType>(CLI.
RetTy)->getAddressSpace());
11067 CLI.
Ins.push_back(MyFlags);
11081 CLI.
Ins.push_back(MyFlags);
11089 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
11093 Type *FinalType = Args[i].Ty;
11094 if (Args[i].IsByVal)
11095 FinalType = Args[i].IndirectType;
11098 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
Value != NumValues;
11103 Args[i].Node.getResNo() +
Value);
11110 Flags.setOrigAlign(OriginalAlignment);
11112 if (Args[i].Ty->isPointerTy()) {
11113 Flags.setPointer();
11114 Flags.setPointerAddrSpace(
11117 if (Args[i].IsZExt)
11119 if (Args[i].IsSExt)
11121 if (Args[i].IsNoExt)
11123 if (Args[i].IsInReg) {
11127 isa<StructType>(FinalType)) {
11130 Flags.setHvaStart();
11136 if (Args[i].IsSRet)
11138 if (Args[i].IsSwiftSelf)
11139 Flags.setSwiftSelf();
11140 if (Args[i].IsSwiftAsync)
11141 Flags.setSwiftAsync();
11142 if (Args[i].IsSwiftError)
11143 Flags.setSwiftError();
11144 if (Args[i].IsCFGuardTarget)
11145 Flags.setCFGuardTarget();
11146 if (Args[i].IsByVal)
11148 if (Args[i].IsByRef)
11150 if (Args[i].IsPreallocated) {
11151 Flags.setPreallocated();
11159 if (Args[i].IsInAlloca) {
11160 Flags.setInAlloca();
11169 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11170 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11171 Flags.setByValSize(FrameSize);
11174 if (
auto MA = Args[i].Alignment)
11178 }
else if (
auto MA = Args[i].Alignment) {
11181 MemAlign = OriginalAlignment;
11183 Flags.setMemAlign(MemAlign);
11184 if (Args[i].IsNest)
11187 Flags.setInConsecutiveRegs();
11196 if (Args[i].IsSExt)
11198 else if (Args[i].IsZExt)
11203 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11208 Args[i].Ty->getPointerAddressSpace())) &&
11209 RetTys.
size() == NumValues &&
"unexpected use of 'returned'");
11222 CLI.
RetZExt == Args[i].IsZExt))
11223 Flags.setReturned();
11229 for (
unsigned j = 0; j != NumParts; ++j) {
11236 j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
11237 if (NumParts > 1 && j == 0)
11241 if (j == NumParts - 1)
11245 CLI.
Outs.push_back(MyFlags);
11246 CLI.
OutVals.push_back(Parts[j]);
11249 if (NeedsRegBlock &&
Value == NumValues - 1)
11250 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11262 "LowerCall didn't return a valid chain!");
11264 "LowerCall emitted a return value for a tail call!");
11266 "LowerCall didn't emit the correct number of values!");
11278 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11279 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11280 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11281 "LowerCall emitted a value with the wrong type!");
11291 unsigned NumValues = RetTys.
size();
11292 ReturnValues.
resize(NumValues);
11299 for (
unsigned i = 0; i < NumValues; ++i) {
11307 DemoteStackIdx, Offsets[i]),
11309 ReturnValues[i] = L;
11310 Chains[i] = L.getValue(1);
11317 std::optional<ISD::NodeType> AssertOp;
11322 unsigned CurReg = 0;
11323 for (
EVT VT : RetTys) {
11330 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11338 if (ReturnValues.
empty())
11344 return std::make_pair(Res, CLI.
Chain);
11361 if (
N->getNumValues() == 1) {
11369 "Lowering returned the wrong number of results!");
11372 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11385 cast<RegisterSDNode>(
Op.getOperand(1))->getReg() != Reg) &&
11386 "Copy from a reg to the same reg!");
11400 ExtendType = PreferredExtendIt->second;
11403 PendingExports.push_back(Chain);
11415 return A->use_empty();
11417 const BasicBlock &Entry =
A->getParent()->front();
11418 for (
const User *U :
A->users())
11419 if (cast<Instruction>(U)->
getParent() != &Entry || isa<SwitchInst>(U))
11427 std::pair<const AllocaInst *, const StoreInst *>>;
11439 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11441 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11442 StaticAllocas.
reserve(NumArgs * 2);
11444 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11447 V = V->stripPointerCasts();
11448 const auto *AI = dyn_cast<AllocaInst>(V);
11449 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11452 return &Iter.first->second;
11462 const auto *SI = dyn_cast<StoreInst>(&
I);
11469 if (
I.isDebugOrPseudoInst())
11473 for (
const Use &U :
I.operands()) {
11474 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11475 *
Info = StaticAllocaInfo::Clobbered;
11481 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
11482 *
Info = StaticAllocaInfo::Clobbered;
11485 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
11486 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11489 const AllocaInst *AI = cast<AllocaInst>(Dst);
11492 if (*
Info != StaticAllocaInfo::Unknown)
11500 const Value *Val = SI->getValueOperand()->stripPointerCasts();
11501 const auto *Arg = dyn_cast<Argument>(Val);
11502 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11503 Arg->getType()->isEmptyTy() ||
11504 DL.getTypeStoreSize(Arg->getType()) !=
11506 !
DL.typeSizeEqualsStoreSize(Arg->getType()) ||
11507 ArgCopyElisionCandidates.
count(Arg)) {
11508 *
Info = StaticAllocaInfo::Clobbered;
11512 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11516 *
Info = StaticAllocaInfo::Elidable;
11517 ArgCopyElisionCandidates.
insert({Arg, {AI, SI}});
11522 if (ArgCopyElisionCandidates.
size() == NumArgs)
11536 auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
11539 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
11546 auto ArgCopyIter = ArgCopyElisionCandidates.
find(&Arg);
11547 assert(ArgCopyIter != ArgCopyElisionCandidates.
end());
11548 const AllocaInst *AI = ArgCopyIter->second.first;
11549 int FixedIndex = FINode->getIndex();
11551 int OldIndex = AllocaIndex;
11555 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11561 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11562 "greater than stack argument alignment ("
11563 <<
DebugStr(RequiredAlignment) <<
" vs "
11571 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11572 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11578 AllocaIndex = FixedIndex;
11579 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11580 for (
SDValue ArgVal : ArgVals)
11584 const StoreInst *SI = ArgCopyIter->second.second;
11585 ElidedArgCopyInstrs.
insert(SI);
11597void SelectionDAGISel::LowerArguments(
const Function &
F) {
11604 if (
F.hasFnAttribute(Attribute::Naked))
11616 Ins.push_back(RetArg);
11624 ArgCopyElisionCandidates);
11628 unsigned ArgNo = Arg.getArgNo();
11631 bool isArgValueUsed = !Arg.use_empty();
11632 unsigned PartBase = 0;
11633 Type *FinalType = Arg.getType();
11634 if (Arg.hasAttribute(Attribute::ByVal))
11635 FinalType = Arg.getParamByValType();
11637 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11638 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
11645 if (Arg.getType()->isPointerTy()) {
11646 Flags.setPointer();
11647 Flags.setPointerAddrSpace(
11648 cast<PointerType>(Arg.getType())->getAddressSpace());
11650 if (Arg.hasAttribute(Attribute::ZExt))
11652 if (Arg.hasAttribute(Attribute::SExt))
11654 if (Arg.hasAttribute(Attribute::InReg)) {
11658 isa<StructType>(Arg.getType())) {
11661 Flags.setHvaStart();
11667 if (Arg.hasAttribute(Attribute::StructRet))
11669 if (Arg.hasAttribute(Attribute::SwiftSelf))
11670 Flags.setSwiftSelf();
11671 if (Arg.hasAttribute(Attribute::SwiftAsync))
11672 Flags.setSwiftAsync();
11673 if (Arg.hasAttribute(Attribute::SwiftError))
11674 Flags.setSwiftError();
11675 if (Arg.hasAttribute(Attribute::ByVal))
11677 if (Arg.hasAttribute(Attribute::ByRef))
11679 if (Arg.hasAttribute(Attribute::InAlloca)) {
11680 Flags.setInAlloca();
11688 if (Arg.hasAttribute(Attribute::Preallocated)) {
11689 Flags.setPreallocated();
11701 const Align OriginalAlignment(
11703 Flags.setOrigAlign(OriginalAlignment);
11706 Type *ArgMemTy =
nullptr;
11707 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11710 ArgMemTy = Arg.getPointeeInMemoryValueType();
11712 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11717 if (
auto ParamAlign = Arg.getParamStackAlign())
11718 MemAlign = *ParamAlign;
11719 else if ((ParamAlign = Arg.getParamAlign()))
11720 MemAlign = *ParamAlign;
11723 if (
Flags.isByRef())
11724 Flags.setByRefSize(MemSize);
11726 Flags.setByValSize(MemSize);
11727 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11728 MemAlign = *ParamAlign;
11730 MemAlign = OriginalAlignment;
11732 Flags.setMemAlign(MemAlign);
11734 if (Arg.hasAttribute(Attribute::Nest))
11737 Flags.setInConsecutiveRegs();
11738 if (ArgCopyElisionCandidates.
count(&Arg))
11739 Flags.setCopyElisionCandidate();
11740 if (Arg.hasAttribute(Attribute::Returned))
11741 Flags.setReturned();
11747 for (
unsigned i = 0; i != NumRegs; ++i) {
11752 Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11754 if (NumRegs > 1 && i == 0)
11755 MyFlags.Flags.setSplit();
11758 MyFlags.Flags.setOrigAlign(
Align(1));
11759 if (i == NumRegs - 1)
11760 MyFlags.Flags.setSplitEnd();
11762 Ins.push_back(MyFlags);
11764 if (NeedsRegBlock &&
Value == NumValues - 1)
11765 Ins[
Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11773 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11777 "LowerFormalArguments didn't return a valid chain!");
11779 "LowerFormalArguments didn't emit the correct number of values!");
11781 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
11782 assert(InVals[i].getNode() &&
11783 "LowerFormalArguments emitted a null value!");
11784 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11785 "LowerFormalArguments emitted a value with the wrong type!");
11799 std::optional<ISD::NodeType> AssertOp;
11802 F.getCallingConv(), AssertOp);
11808 FuncInfo->DemoteRegister = SRetReg;
11810 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11823 unsigned NumValues = ValueVTs.
size();
11824 if (NumValues == 0)
11827 bool ArgHasUses = !Arg.use_empty();
11831 if (Ins[i].
Flags.isCopyElisionCandidate()) {
11832 unsigned NumParts = 0;
11833 for (
EVT VT : ValueVTs)
11835 F.getCallingConv(), VT);
11839 ArrayRef(&InVals[i], NumParts), ArgHasUses);
11844 bool isSwiftErrorArg =
11846 Arg.hasAttribute(Attribute::SwiftError);
11847 if (!ArgHasUses && !isSwiftErrorArg) {
11848 SDB->setUnusedArgValue(&Arg, InVals[i]);
11852 dyn_cast<FrameIndexSDNode>(InVals[i].
getNode()))
11853 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11856 for (
unsigned Val = 0; Val != NumValues; ++Val) {
11857 EVT VT = ValueVTs[Val];
11859 F.getCallingConv(), VT);
11866 if (ArgHasUses || isSwiftErrorArg) {
11867 std::optional<ISD::NodeType> AssertOp;
11868 if (Arg.hasAttribute(Attribute::SExt))
11870 else if (Arg.hasAttribute(Attribute::ZExt))
11874 PartVT, VT,
nullptr, NewRoot,
11875 F.getCallingConv(), AssertOp));
11882 if (ArgValues.
empty())
11887 dyn_cast<FrameIndexSDNode>(ArgValues[0].
getNode()))
11888 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11891 SDB->getCurSDLoc());
11893 SDB->setValue(&Arg, Res);
11906 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11907 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11917 if (
Reg.isVirtual())
11929 if (
Reg.isVirtual()) {
11935 FuncInfo->InitializeRegForValue(&Arg);
11936 SDB->CopyToExportRegsIfNeeded(&Arg);
11940 if (!Chains.
empty()) {
11947 assert(i == InVals.
size() &&
"Argument register count mismatch!");
11951 if (!ArgCopyElisionFrameIndexMap.
empty()) {
11954 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
11955 if (
I != ArgCopyElisionFrameIndexMap.
end())
11956 VI.updateStackSlot(
I->second);
11971SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
11979 if (!isa<PHINode>(SuccBB->begin()))
continue;
11984 if (!SuccsHandled.
insert(SuccMBB).second)
11992 for (
const PHINode &PN : SuccBB->phis()) {
11994 if (PN.use_empty())
11998 if (PN.getType()->isEmptyTy())
12002 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12004 if (
const auto *
C = dyn_cast<Constant>(PHIOp)) {
12011 if (
auto *CI = dyn_cast<ConstantInt>(
C))
12023 assert(isa<AllocaInst>(PHIOp) &&
12025 "Didn't codegen value into a register!??");
12035 for (
EVT VT : ValueVTs) {
12037 for (
unsigned i = 0; i != NumRegisters; ++i)
12039 std::make_pair(&*
MBBI++, Reg + i));
12040 Reg += NumRegisters;
12060void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
12062 if (MaybeTC.
getNode() !=
nullptr)
12077 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
12081 if (
Size == 2 &&
W.MBB == SwitchMBB) {
12094 const APInt &SmallValue =
Small.Low->getValue();
12095 const APInt &BigValue =
Big.Low->getValue();
12098 APInt CommonBit = BigValue ^ SmallValue;
12113 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
12115 addSuccessorWithProb(
12116 SwitchMBB, DefaultMBB,
12120 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12143 return a.Prob != b.Prob ?
12145 a.Low->getValue().slt(b.Low->getValue());
12152 if (
I->Prob >
W.LastCluster->Prob)
12154 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12165 UnhandledProbs +=
I->Prob;
12169 bool FallthroughUnreachable =
false;
12171 if (
I ==
W.LastCluster) {
12173 Fallthrough = DefaultMBB;
12174 FallthroughUnreachable = isa<UnreachableInst>(
12178 CurMF->
insert(BBI, Fallthrough);
12182 UnhandledProbs -=
I->Prob;
12192 CurMF->
insert(BBI, JumpMBB);
12194 auto JumpProb =
I->Prob;
12195 auto FallthroughProb = UnhandledProbs;
12203 if (*SI == DefaultMBB) {
12204 JumpProb += DefaultProb / 2;
12205 FallthroughProb -= DefaultProb / 2;
12223 if (FallthroughUnreachable) {
12230 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12231 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12237 JT->Default = Fallthrough;
12240 if (CurMBB == SwitchMBB) {
12263 BTB->
Prob += DefaultProb / 2;
12267 if (FallthroughUnreachable)
12271 if (CurMBB == SwitchMBB) {
12280 if (
I->Low ==
I->High) {
12295 if (FallthroughUnreachable)
12299 CaseBlock CB(
CC, LHS, RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12302 if (CurMBB == SwitchMBB)
12305 SL->SwitchCases.push_back(CB);
12310 CurMBB = Fallthrough;
12314void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12318 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12319 "Clusters not sorted?");
12320 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12322 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12323 SL->computeSplitWorkItemInfo(W);
12328 assert(PivotCluster >
W.FirstCluster);
12329 assert(PivotCluster <=
W.LastCluster);
12344 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12345 FirstLeft->Low ==
W.GE &&
12346 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12347 LeftMBB = FirstLeft->MBB;
12352 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12361 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12362 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12363 RightMBB = FirstRight->MBB;
12368 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12377 if (
W.MBB == SwitchMBB)
12380 SL->SwitchCases.push_back(CB);
12413 unsigned PeeledCaseIndex = 0;
12414 bool SwitchPeeled =
false;
12415 for (
unsigned Index = 0;
Index < Clusters.size(); ++
Index) {
12417 if (
CC.Prob < TopCaseProb)
12419 TopCaseProb =
CC.Prob;
12420 PeeledCaseIndex =
Index;
12421 SwitchPeeled =
true;
12426 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12427 << TopCaseProb <<
"\n");
12437 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12439 nullptr,
nullptr, TopCaseProb.
getCompl()};
12440 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12442 Clusters.erase(PeeledCaseIt);
12445 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12446 <<
CC.Prob <<
"\n");
12450 PeeledCaseProb = TopCaseProb;
12451 return PeeledSwitchMBB;
12454void SelectionDAGBuilder::visitSwitch(
const SwitchInst &SI) {
12458 Clusters.reserve(
SI.getNumCases());
12459 for (
auto I :
SI.cases()) {
12478 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12482 if (Clusters.empty()) {
12483 assert(PeeledSwitchMBB == SwitchMBB);
12485 if (DefaultMBB != NextBlock(SwitchMBB)) {
12494 SL->findBitTestClusters(Clusters, &SI);
12497 dbgs() <<
"Case clusters: ";
12504 C.Low->getValue().print(
dbgs(),
true);
12505 if (
C.Low !=
C.High) {
12507 C.High->getValue().print(
dbgs(),
true);
12514 assert(!Clusters.empty());
12518 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12525 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12527 while (!WorkList.
empty()) {
12529 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12534 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12538 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12542void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12549void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12555 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12566 for (
unsigned i = 0; i != NumElts; ++i)
12567 Mask.push_back(NumElts - 1 - i);
12572void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I) {
12603void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I) {
12628void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12632 unsigned NumValues = ValueVTs.
size();
12633 if (NumValues == 0)
return;
12638 for (
unsigned i = 0; i != NumValues; ++i)
12646void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12653 int64_t
Imm = cast<ConstantInt>(
I.getOperand(2))->getSExtValue();
12670 for (
unsigned i = 0; i < NumElts; ++i)
12699 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12700 "start of copy chain MUST be COPY");
12701 Reg =
MI->getOperand(1).getReg();
12702 MI =
MRI.def_begin(Reg)->getParent();
12704 if (
MI->getOpcode() == TargetOpcode::COPY) {
12705 assert(Reg.isVirtual() &&
"expected COPY of virtual register");
12706 Reg =
MI->getOperand(1).getReg();
12707 assert(Reg.isPhysical() &&
"expected COPY of physical register");
12708 MI =
MRI.def_begin(Reg)->getParent();
12711 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12712 "end of copy chain MUST be INLINEASM_BR");
12720void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12724 cast<CallBrInst>(
I.getParent()->getUniquePredecessor()->getTerminator());
12736 for (
auto &
T : TargetConstraints) {
12737 SDISelAsmOperandInfo OpInfo(
T);
12745 switch (OpInfo.ConstraintType) {
12753 for (
Register &Reg : OpInfo.AssignedRegs.Regs) {
12761 SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12764 ResultVTs.
push_back(OpInfo.ConstraintVT);
12773 ResultVTs.
push_back(OpInfo.ConstraintVT);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
unsigned const TargetRegisterInfo * TRI
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, ISD::MemIndexType &IndexType, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static void findWasmUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const unsigned char *MatcherTable, unsigned &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
static SymbolRef::Type getType(const Symbol *Sym)
support::ulittle16_t & Lo
support::ulittle16_t & Hi
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
This class represents the atomic memcpy intrinsic i.e.
an instruction that atomically reads a memory location, combines it with another value,...
@ USubCond
Subtract only if no unsigned overflow.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
LLVM Basic Block Representation.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
const Instruction & back() const
This class represents a no-op cast from one type to another.
bool test(unsigned Idx) const
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
size_type size() const
size - Returns the number of bits in this bitvector.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
This class represents a range of values.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
unsigned getNonMetadataArgCount() const
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Base class for variables.
std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
iterator_range< location_op_iterator > getValues() const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
bool isKillLocation() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
Register CreateRegs(const Value *V)
SmallPtrSet< const DbgVariableRecord *, 8 > PreprocessedDVRDeclares
MachineBasicBlock * getMBB(const BasicBlock *BB) const
Register DemoteRegister
DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg allocated to hold a pointer to ...
BitVector DescribedArgs
Bitvector with a bit set if corresponding argument is described in ArgDbgValues.
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
bool isExportedInst(const Value *V) const
isExportedInst - Return true if the specified value is an instruction exported from its block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
Register InitializeRegForValue(const Value *V)
unsigned ExceptionPointerVirtReg
If the current MBB is a landing pad, the exception pointer and exception selector registers are copie...
SmallPtrSet< const DbgDeclareInst *, 8 > PreprocessedDbgDeclares
Collection of dbg.declare instructions handled after argument lowering and before ISel proper.
DenseMap< const Value *, Register > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
unsigned ExceptionSelectorVirtReg
SmallVector< MachineInstr *, 8 > ArgDbgValues
ArgDbgValues - A list of DBG_VALUE instructions created during isel for function arguments that are i...
unsigned getCurrentCallSite()
Get the call site currently being processed, if any. Return zero if none.
void setCurrentCallSite(unsigned Site)
Set the call site currently being processed.
MachineRegisterInfo * RegInfo
Register CreateReg(MVT VT, bool isDivergent=false)
CreateReg - Allocate a single virtual register for the given type.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
DenseMap< const Value *, ISD::NodeType > PreferredExtendType
Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND) for a value.
Register getCatchPadExceptionPointerVReg(const Value *CPI, const TargetRegisterClass *RC)
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
void addStackRoot(int Num, const Constant *Metadata)
addStackRoot - Registers a root that lives on the stack.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
bool isEHPad() const
Returns true if the block is a landing pad.
void setIsEHCatchretTarget(bool V=true)
Indicates if this is a target block of a catchret.
void setIsCleanupFuncletEntry(bool V=true)
Indicates if this is the entry block of a cleanup funclet.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setHasPatchPoint(bool s=true)
void setHasStackMap(bool s=true)
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
Description of the location of a variable whose Address is valid and unchanging during function execu...
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
void setCallsUnwindInit(bool b)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void setHasEHCatchret(bool V)
void setCallsEHReturn(bool b)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
auto getInStackSlotVariableDbgInfo()
Returns the collection of variables for which we have debug info and that have been assigned a stack ...
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
bool hasEHFunclets() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
ArrayRef< std::pair< MCRegister, Register > > liveins() const
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Representation for a specific memory location.
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(unsigned VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
void CopyValueToVirtualRegister(const Value *V, unsigned Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void init(GCFunctionInfo *gfi, AAResults *AA, AssumptionCache *AC, const TargetLibraryInfo *li)
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
DenseMap< const Constant *, unsigned > ConstantsOut
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
virtual void emitFunctionEntryCode()
SwiftErrorValueTracking * SwiftError
std::unique_ptr< SelectionDAGBuilder > SDB
Targets can subclass this to parameterize the SelectionDAG lowering and instruction selection process...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, MachinePointerInfo SrcPtrInfo) const
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
Help to insert SDNodeFlags automatically in transforming.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
BlockFrequencyInfo * getBFI() const
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, unsigned VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
void addMMRAMetadata(const SDNode *Node, MDNode *MMRA)
Set MMRAMetadata to be associated with Node.
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getRegister(Register Reg, EVT VT)
SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
bool shouldOptForSize() const
SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
void DeleteNode(SDNode *N)
Remove the specified node from the system.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
ProfileSummaryInfo * getPSI() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getPartialReduceAdd(SDLoc DL, EVT ReducedTy, SDValue Op1, SDValue Op2)
Create the DAG equivalent of vector_partial_reduce where Op1 and Op2 are its operands and ReducedTY i...
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getValueType(EVT)
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
const FunctionVarLocs * getFunctionVarLocs() const
Returns the result of the AssignmentTrackingAnalysis pass if it's available, otherwise return nullptr...
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex, int64_t Size, int64_t Offset=-1)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the por...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
void addPCSections(const SDNode *Node, MDNode *MD)
Set PCSections to be associated with Node.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
void clear()
Clear the memory usage of this object.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)
Set the swifterror virtual register in the VRegDefMap for this basic block.
Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a use of a swifterror by an instruction.
Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction.
const Value * getFunctionArg() const
Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
TargetIntrinsicInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps, const Value *, const Value *) const
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandPartialReductionIntrinsic(const IntrinsicInst *I) const
Return true if the @llvm.experimental.vector.partial.reduce.
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual SDValue LowerFormalArguments(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::InputArg > &, const SDLoc &, SelectionDAG &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, const SDLoc &, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array,...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
virtual TargetTransformInfo getTargetTransformInfo(const Function &F) const
Return a TargetTransformInfo for a given function.
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
unsigned getID() const
Return the register class ID number.
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
TypeID
Definitions of all of the base types for the Type system.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SET_FPENV
Sets the current floating-point environment.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ RESET_FPENV
Set floating-point environment to default state.
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ VECTOR_FIND_LAST_ACTIVE
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ FAKE_USE
FAKE_USE represents a use of the operand but does not do anything.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ SET_ROUNDING
Set rounding mode.
@ SIGN_EXTEND
Conversion operators.
@ PREALLOCATED_SETUP
PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE with the preallocated call Va...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ BR
Control flow instructions. These all have token chains.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
@ PREALLOCATED_ARG
PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE with the preallocated call Value,...
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ CLEANUPRET
CLEANUPRET - Represents a return from a cleanup block funclet.
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
@ GET_FPENV
Gets the current floating-point environment.
@ SHL
Shift and rotation operations.
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ PCMARKER
PCMARKER - This corresponds to the pcmarker intrinsic.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ CATCHRET
CATCHRET - Represents a return from a catch block funclet.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
VScaleVal_match m_VScale()
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
std::vector< CaseCluster > CaseClusterVector
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
CaseClusterVector::iterator CaseClusterIt
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int popcount(T Value) noexcept
Count the number of set bits in a value.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
@ Default
The result values are uniform if and only if all operands are uniform.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
static const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setPointerAddrSpace(unsigned AS)
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
A cluster of case labels.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)