LLVM 20.0.0git
MachineInstr.cpp
Go to the documentation of this file.
1//===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Methods common to all machine instructions.
10//
11//===----------------------------------------------------------------------===//
12
14#include "llvm/ADT/ArrayRef.h"
15#include "llvm/ADT/Hashing.h"
16#include "llvm/ADT/STLExtras.h"
38#include "llvm/IR/Constants.h"
40#include "llvm/IR/DebugLoc.h"
41#include "llvm/IR/Function.h"
42#include "llvm/IR/InlineAsm.h"
44#include "llvm/IR/LLVMContext.h"
45#include "llvm/IR/Metadata.h"
46#include "llvm/IR/Module.h"
48#include "llvm/IR/Operator.h"
49#include "llvm/MC/MCInstrDesc.h"
53#include "llvm/Support/Debug.h"
58#include <algorithm>
59#include <cassert>
60#include <cstdint>
61#include <cstring>
62#include <utility>
63
64using namespace llvm;
65
67 if (const MachineBasicBlock *MBB = MI.getParent())
68 if (const MachineFunction *MF = MBB->getParent())
69 return MF;
70 return nullptr;
71}
72
73// Try to crawl up to the machine function and get TRI and IntrinsicInfo from
74// it.
76 const TargetRegisterInfo *&TRI,
78 const TargetIntrinsicInfo *&IntrinsicInfo,
79 const TargetInstrInfo *&TII) {
80
81 if (const MachineFunction *MF = getMFIfAvailable(MI)) {
82 TRI = MF->getSubtarget().getRegisterInfo();
83 MRI = &MF->getRegInfo();
84 IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
85 TII = MF->getSubtarget().getInstrInfo();
86 }
87}
88
90 for (MCPhysReg ImpDef : MCID->implicit_defs())
91 addOperand(MF, MachineOperand::CreateReg(ImpDef, true, true));
92 for (MCPhysReg ImpUse : MCID->implicit_uses())
93 addOperand(MF, MachineOperand::CreateReg(ImpUse, false, true));
94}
95
96/// MachineInstr ctor - This constructor creates a MachineInstr and adds the
97/// implicit operands. It reserves space for the number of operands specified by
98/// the MCInstrDesc.
99MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
100 DebugLoc DL, bool NoImp)
101 : MCID(&TID), NumOperands(0), Flags(0), AsmPrinterFlags(0),
102 DbgLoc(std::move(DL)), DebugInstrNum(0), Opcode(TID.Opcode) {
103 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
104
105 // Reserve space for the expected number of operands.
106 if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
107 MCID->implicit_uses().size()) {
108 CapOperands = OperandCapacity::get(NumOps);
109 Operands = MF.allocateOperandArray(CapOperands);
110 }
111
112 if (!NoImp)
114}
115
116/// MachineInstr ctor - Copies MachineInstr arg exactly.
117/// Does not copy the number from debug instruction numbering, to preserve
118/// uniqueness.
119MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
120 : MCID(&MI.getDesc()), NumOperands(0), Flags(0), AsmPrinterFlags(0),
121 Info(MI.Info), DbgLoc(MI.getDebugLoc()), DebugInstrNum(0),
122 Opcode(MI.getOpcode()) {
123 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
124
125 CapOperands = OperandCapacity::get(MI.getNumOperands());
126 Operands = MF.allocateOperandArray(CapOperands);
127
128 // Copy operands.
129 for (const MachineOperand &MO : MI.operands())
130 addOperand(MF, MO);
131
132 // Replicate ties between the operands, which addOperand was not
133 // able to do reliably.
134 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
135 MachineOperand &NewMO = getOperand(i);
136 const MachineOperand &OrigMO = MI.getOperand(i);
137 NewMO.TiedTo = OrigMO.TiedTo;
138 }
139
140 // Copy all the sensible flags.
141 setFlags(MI.Flags);
142}
143
145 if (getParent())
146 getMF()->handleChangeDesc(*this, TID);
147 MCID = &TID;
148 Opcode = TID.Opcode;
149}
150
152 MovePos->getParent()->splice(MovePos, getParent(), getIterator());
153}
154
155/// getRegInfo - If this instruction is embedded into a MachineFunction,
156/// return the MachineRegisterInfo object for the current function, otherwise
157/// return null.
158MachineRegisterInfo *MachineInstr::getRegInfo() {
160 return &MBB->getParent()->getRegInfo();
161 return nullptr;
162}
163
164const MachineRegisterInfo *MachineInstr::getRegInfo() const {
165 if (const MachineBasicBlock *MBB = getParent())
166 return &MBB->getParent()->getRegInfo();
167 return nullptr;
168}
169
170void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
171 for (MachineOperand &MO : operands())
172 if (MO.isReg())
173 MRI.removeRegOperandFromUseList(&MO);
174}
175
176void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
177 for (MachineOperand &MO : operands())
178 if (MO.isReg())
179 MRI.addRegOperandToUseList(&MO);
180}
181
184 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
186 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
187 addOperand(*MF, Op);
188}
189
190/// Move NumOps MachineOperands from Src to Dst, with support for overlapping
191/// ranges. If MRI is non-null also update use-def chains.
193 unsigned NumOps, MachineRegisterInfo *MRI) {
194 if (MRI)
195 return MRI->moveOperands(Dst, Src, NumOps);
196 // MachineOperand is a trivially copyable type so we can just use memmove.
197 assert(Dst && Src && "Unknown operands");
198 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
199}
200
201/// addOperand - Add the specified operand to the instruction. If it is an
202/// implicit operand, it is added to the end of the operand list. If it is
203/// an explicit operand it is added at the end of the explicit operand list
204/// (before the first implicit operand).
206 assert(isUInt<LLVM_MI_NUMOPERANDS_BITS>(NumOperands + 1) &&
207 "Cannot add more operands.");
208 assert(MCID && "Cannot add operands before providing an instr descriptor");
209
210 // Check if we're adding one of our existing operands.
211 if (&Op >= Operands && &Op < Operands + NumOperands) {
212 // This is unusual: MI->addOperand(MI->getOperand(i)).
213 // If adding Op requires reallocating or moving existing operands around,
214 // the Op reference could go stale. Support it by copying Op.
215 MachineOperand CopyOp(Op);
216 return addOperand(MF, CopyOp);
217 }
218
219 // Find the insert location for the new operand. Implicit registers go at
220 // the end, everything else goes before the implicit regs.
221 //
222 // FIXME: Allow mixed explicit and implicit operands on inline asm.
223 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
224 // implicit-defs, but they must not be moved around. See the FIXME in
225 // InstrEmitter.cpp.
226 unsigned OpNo = getNumOperands();
227 bool isImpReg = Op.isReg() && Op.isImplicit();
228 if (!isImpReg && !isInlineAsm()) {
229 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
230 --OpNo;
231 assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
232 }
233 }
234
235 // OpNo now points as the desired insertion point. Unless this is a variadic
236 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
237 // RegMask operands go between the explicit and implicit operands.
238 MachineRegisterInfo *MRI = getRegInfo();
239
240 // Determine if the Operands array needs to be reallocated.
241 // Save the old capacity and operand array.
242 OperandCapacity OldCap = CapOperands;
243 MachineOperand *OldOperands = Operands;
244 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
245 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
246 Operands = MF.allocateOperandArray(CapOperands);
247 // Move the operands before the insertion point.
248 if (OpNo)
249 moveOperands(Operands, OldOperands, OpNo, MRI);
250 }
251
252 // Move the operands following the insertion point.
253 if (OpNo != NumOperands)
254 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
255 MRI);
256 ++NumOperands;
257
258 // Deallocate the old operand array.
259 if (OldOperands != Operands && OldOperands)
260 MF.deallocateOperandArray(OldCap, OldOperands);
261
262 // Copy Op into place. It still needs to be inserted into the MRI use lists.
263 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
264 NewMO->ParentMI = this;
265
266 // When adding a register operand, tell MRI about it.
267 if (NewMO->isReg()) {
268 // Ensure isOnRegUseList() returns false, regardless of Op's status.
269 NewMO->Contents.Reg.Prev = nullptr;
270 // Ignore existing ties. This is not a property that can be copied.
271 NewMO->TiedTo = 0;
272 // Add the new operand to MRI, but only for instructions in an MBB.
273 if (MRI)
274 MRI->addRegOperandToUseList(NewMO);
275 // The MCID operand information isn't accurate until we start adding
276 // explicit operands. The implicit operands are added first, then the
277 // explicits are inserted before them.
278 if (!isImpReg) {
279 // Tie uses to defs as indicated in MCInstrDesc.
280 if (NewMO->isUse()) {
281 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
282 if (DefIdx != -1)
283 tieOperands(DefIdx, OpNo);
284 }
285 // If the register operand is flagged as early, mark the operand as such.
286 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
287 NewMO->setIsEarlyClobber(true);
288 }
289 // Ensure debug instructions set debug flag on register uses.
290 if (NewMO->isUse() && isDebugInstr())
291 NewMO->setIsDebug();
292 }
293}
294
295void MachineInstr::removeOperand(unsigned OpNo) {
296 assert(OpNo < getNumOperands() && "Invalid operand number");
297 untieRegOperand(OpNo);
298
299#ifndef NDEBUG
300 // Moving tied operands would break the ties.
301 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
302 if (Operands[i].isReg())
303 assert(!Operands[i].isTied() && "Cannot move tied operands");
304#endif
305
306 MachineRegisterInfo *MRI = getRegInfo();
307 if (MRI && Operands[OpNo].isReg())
308 MRI->removeRegOperandFromUseList(Operands + OpNo);
309
310 // Don't call the MachineOperand destructor. A lot of this code depends on
311 // MachineOperand having a trivial destructor anyway, and adding a call here
312 // wouldn't make it 'destructor-correct'.
313
314 if (unsigned N = NumOperands - 1 - OpNo)
315 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
316 --NumOperands;
317}
318
319void MachineInstr::setExtraInfo(MachineFunction &MF,
321 MCSymbol *PreInstrSymbol,
322 MCSymbol *PostInstrSymbol,
323 MDNode *HeapAllocMarker, MDNode *PCSections,
324 uint32_t CFIType, MDNode *MMRAs) {
325 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
326 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
327 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
328 bool HasPCSections = PCSections != nullptr;
329 bool HasCFIType = CFIType != 0;
330 bool HasMMRAs = MMRAs != nullptr;
331 int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
332 HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs;
333
334 // Drop all extra info if there is none.
335 if (NumPointers <= 0) {
336 Info.clear();
337 return;
338 }
339
340 // If more than one pointer, then store out of line. Store heap alloc markers
341 // out of line because PointerSumType cannot hold more than 4 tag types with
342 // 32-bit pointers.
343 // FIXME: Maybe we should make the symbols in the extra info mutable?
344 else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections ||
345 HasCFIType) {
346 Info.set<EIIK_OutOfLine>(
347 MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
348 HeapAllocMarker, PCSections, CFIType, MMRAs));
349 return;
350 }
351
352 // Otherwise store the single pointer inline.
353 if (HasPreInstrSymbol)
354 Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
355 else if (HasPostInstrSymbol)
356 Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
357 else
358 Info.set<EIIK_MMO>(MMOs[0]);
359}
360
362 if (memoperands_empty())
363 return;
364
365 setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(),
368}
369
372 if (MMOs.empty()) {
373 dropMemRefs(MF);
374 return;
375 }
376
377 setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
380}
381
383 MachineMemOperand *MO) {
386 MMOs.push_back(MO);
387 setMemRefs(MF, MMOs);
388}
389
391 if (this == &MI)
392 // Nothing to do for a self-clone!
393 return;
394
395 assert(&MF == MI.getMF() &&
396 "Invalid machine functions when cloning memory refrences!");
397 // See if we can just steal the extra info already allocated for the
398 // instruction. We can do this whenever the pre- and post-instruction symbols
399 // are the same (including null).
400 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
401 getPostInstrSymbol() == MI.getPostInstrSymbol() &&
402 getHeapAllocMarker() == MI.getHeapAllocMarker() &&
403 getPCSections() == MI.getPCSections() && getMMRAMetadata() &&
404 MI.getMMRAMetadata()) {
405 Info = MI.Info;
406 return;
407 }
408
409 // Otherwise, fall back on a copy-based clone.
410 setMemRefs(MF, MI.memoperands());
411}
412
413/// Check to see if the MMOs pointed to by the two MemRefs arrays are
414/// identical.
417 if (LHS.size() != RHS.size())
418 return false;
419
420 auto LHSPointees = make_pointee_range(LHS);
421 auto RHSPointees = make_pointee_range(RHS);
422 return std::equal(LHSPointees.begin(), LHSPointees.end(),
423 RHSPointees.begin());
424}
425
428 // Try handling easy numbers of MIs with simpler mechanisms.
429 if (MIs.empty()) {
430 dropMemRefs(MF);
431 return;
432 }
433 if (MIs.size() == 1) {
434 cloneMemRefs(MF, *MIs[0]);
435 return;
436 }
437 // Because an empty memoperands list provides *no* information and must be
438 // handled conservatively (assuming the instruction can do anything), the only
439 // way to merge with it is to drop all other memoperands.
440 if (MIs[0]->memoperands_empty()) {
441 dropMemRefs(MF);
442 return;
443 }
444
445 // Handle the general case.
447 // Start with the first instruction.
448 assert(&MF == MIs[0]->getMF() &&
449 "Invalid machine functions when cloning memory references!");
450 MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
451 // Now walk all the other instructions and accumulate any different MMOs.
452 for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
453 assert(&MF == MI.getMF() &&
454 "Invalid machine functions when cloning memory references!");
455
456 // Skip MIs with identical operands to the first. This is a somewhat
457 // arbitrary hack but will catch common cases without being quadratic.
458 // TODO: We could fully implement merge semantics here if needed.
459 if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
460 continue;
461
462 // Because an empty memoperands list provides *no* information and must be
463 // handled conservatively (assuming the instruction can do anything), the
464 // only way to merge with it is to drop all other memoperands.
465 if (MI.memoperands_empty()) {
466 dropMemRefs(MF);
467 return;
468 }
469
470 // Otherwise accumulate these into our temporary buffer of the merged state.
471 MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
472 }
473
474 setMemRefs(MF, MergedMMOs);
475}
476
478 // Do nothing if old and new symbols are the same.
479 if (Symbol == getPreInstrSymbol())
480 return;
481
482 // If there was only one symbol and we're removing it, just clear info.
483 if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
484 Info.clear();
485 return;
486 }
487
488 setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
491}
492
494 // Do nothing if old and new symbols are the same.
495 if (Symbol == getPostInstrSymbol())
496 return;
497
498 // If there was only one symbol and we're removing it, just clear info.
499 if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
500 Info.clear();
501 return;
502 }
503
504 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
507}
508
510 // Do nothing if old and new symbols are the same.
511 if (Marker == getHeapAllocMarker())
512 return;
513
514 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
516}
517
519 // Do nothing if old and new symbols are the same.
520 if (PCSections == getPCSections())
521 return;
522
523 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
524 getHeapAllocMarker(), PCSections, getCFIType(),
526}
527
529 // Do nothing if old and new types are the same.
530 if (Type == getCFIType())
531 return;
532
533 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
535}
536
538 // Do nothing if old and new symbols are the same.
539 if (MMRAs == getMMRAMetadata())
540 return;
541
542 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
544}
545
547 const MachineInstr &MI) {
548 if (this == &MI)
549 // Nothing to do for a self-clone!
550 return;
551
552 assert(&MF == MI.getMF() &&
553 "Invalid machine functions when cloning instruction symbols!");
554
555 setPreInstrSymbol(MF, MI.getPreInstrSymbol());
556 setPostInstrSymbol(MF, MI.getPostInstrSymbol());
557 setHeapAllocMarker(MF, MI.getHeapAllocMarker());
558 setPCSections(MF, MI.getPCSections());
559 setMMRAMetadata(MF, MI.getMMRAMetadata());
560}
561
563 // For now, the just return the union of the flags. If the flags get more
564 // complicated over time, we might need more logic here.
565 return getFlags() | Other.getFlags();
566}
567
569 uint32_t MIFlags = 0;
570 // Copy the wrapping flags.
571 if (const OverflowingBinaryOperator *OB =
572 dyn_cast<OverflowingBinaryOperator>(&I)) {
573 if (OB->hasNoSignedWrap())
575 if (OB->hasNoUnsignedWrap())
577 } else if (const TruncInst *TI = dyn_cast<TruncInst>(&I)) {
578 if (TI->hasNoSignedWrap())
580 if (TI->hasNoUnsignedWrap())
582 } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
583 if (GEP->hasNoUnsignedSignedWrap())
585 if (GEP->hasNoUnsignedWrap())
587 }
588
589 // Copy the nonneg flag.
590 if (const PossiblyNonNegInst *PNI = dyn_cast<PossiblyNonNegInst>(&I)) {
591 if (PNI->hasNonNeg())
593 // Copy the disjoint flag.
594 } else if (const PossiblyDisjointInst *PD =
595 dyn_cast<PossiblyDisjointInst>(&I)) {
596 if (PD->isDisjoint())
598 }
599
600 // Copy the samesign flag.
601 if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I))
602 if (ICmp->hasSameSign())
604
605 // Copy the exact flag.
606 if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
607 if (PE->isExact())
609
610 // Copy the fast-math flags.
611 if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
612 const FastMathFlags Flags = FP->getFastMathFlags();
613 if (Flags.noNaNs())
615 if (Flags.noInfs())
617 if (Flags.noSignedZeros())
619 if (Flags.allowReciprocal())
621 if (Flags.allowContract())
623 if (Flags.approxFunc())
625 if (Flags.allowReassoc())
627 }
628
629 if (I.getMetadata(LLVMContext::MD_unpredictable))
631
632 return MIFlags;
633}
634
637}
638
639bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
640 assert(!isBundledWithPred() && "Must be called on bundle header");
642 if (MII->getDesc().getFlags() & Mask) {
643 if (Type == AnyInBundle)
644 return true;
645 } else {
646 if (Type == AllInBundle && !MII->isBundle())
647 return false;
648 }
649 // This was the last instruction in the bundle.
650 if (!MII->isBundledWithSucc())
651 return Type == AllInBundle;
652 }
653}
654
656 MICheckType Check) const {
657 // If opcodes or number of operands are not the same then the two
658 // instructions are obviously not identical.
659 if (Other.getOpcode() != getOpcode() ||
660 Other.getNumOperands() != getNumOperands())
661 return false;
662
663 if (isBundle()) {
664 // We have passed the test above that both instructions have the same
665 // opcode, so we know that both instructions are bundles here. Let's compare
666 // MIs inside the bundle.
667 assert(Other.isBundle() && "Expected that both instructions are bundles.");
670 // Loop until we analysed the last intruction inside at least one of the
671 // bundles.
672 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
673 ++I1;
674 ++I2;
675 if (!I1->isIdenticalTo(*I2, Check))
676 return false;
677 }
678 // If we've reached the end of just one of the two bundles, but not both,
679 // the instructions are not identical.
680 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
681 return false;
682 }
683
684 // Check operands to make sure they match.
685 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
686 const MachineOperand &MO = getOperand(i);
687 const MachineOperand &OMO = Other.getOperand(i);
688 if (!MO.isReg()) {
689 if (!MO.isIdenticalTo(OMO))
690 return false;
691 continue;
692 }
693
694 // Clients may or may not want to ignore defs when testing for equality.
695 // For example, machine CSE pass only cares about finding common
696 // subexpressions, so it's safe to ignore virtual register defs.
697 if (MO.isDef()) {
698 if (Check == IgnoreDefs)
699 continue;
700 else if (Check == IgnoreVRegDefs) {
701 if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
702 if (!MO.isIdenticalTo(OMO))
703 return false;
704 } else {
705 if (!MO.isIdenticalTo(OMO))
706 return false;
707 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
708 return false;
709 }
710 } else {
711 if (!MO.isIdenticalTo(OMO))
712 return false;
713 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
714 return false;
715 }
716 }
717 // If DebugLoc does not match then two debug instructions are not identical.
718 if (isDebugInstr())
719 if (getDebugLoc() && Other.getDebugLoc() &&
720 getDebugLoc() != Other.getDebugLoc())
721 return false;
722 // If pre- or post-instruction symbols do not match then the two instructions
723 // are not identical.
724 if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
725 getPostInstrSymbol() != Other.getPostInstrSymbol())
726 return false;
727 // Call instructions with different CFI types are not identical.
728 if (isCall() && getCFIType() != Other.getCFIType())
729 return false;
730
731 return true;
732}
733
735 if (!isDebugValueLike() || !Other.isDebugValueLike())
736 return false;
737 if (getDebugLoc() != Other.getDebugLoc())
738 return false;
739 if (getDebugVariable() != Other.getDebugVariable())
740 return false;
741 if (getNumDebugOperands() != Other.getNumDebugOperands())
742 return false;
743 for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
744 if (!getDebugOperand(OpIdx).isIdenticalTo(Other.getDebugOperand(OpIdx)))
745 return false;
748 Other.getDebugExpression(), Other.isIndirectDebugValue()))
749 return false;
750 return true;
751}
752
754 return getParent()->getParent();
755}
756
758 assert(getParent() && "Not embedded in a basic block!");
759 return getParent()->remove(this);
760}
761
763 assert(getParent() && "Not embedded in a basic block!");
764 return getParent()->remove_instr(this);
765}
766
768 assert(getParent() && "Not embedded in a basic block!");
769 getParent()->erase(this);
770}
771
773 assert(getParent() && "Not embedded in a basic block!");
774 getParent()->erase_instr(this);
775}
776
778 if (!isCall(Type))
779 return false;
780 switch (getOpcode()) {
781 case TargetOpcode::PATCHPOINT:
782 case TargetOpcode::STACKMAP:
783 case TargetOpcode::STATEPOINT:
784 case TargetOpcode::FENTRY_CALL:
785 return false;
786 }
787 return true;
788}
789
791 if (isBundle())
794}
795
797 unsigned NumOperands = MCID->getNumOperands();
798 if (!MCID->isVariadic())
799 return NumOperands;
800
801 for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
802 const MachineOperand &MO = getOperand(I);
803 // The operands must always be in the following order:
804 // - explicit reg defs,
805 // - other explicit operands (reg uses, immediates, etc.),
806 // - implicit reg defs
807 // - implicit reg uses
808 if (MO.isReg() && MO.isImplicit())
809 break;
810 ++NumOperands;
811 }
812 return NumOperands;
813}
814
816 unsigned NumDefs = MCID->getNumDefs();
817 if (!MCID->isVariadic())
818 return NumDefs;
819
820 for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
821 const MachineOperand &MO = getOperand(I);
822 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
823 break;
824 ++NumDefs;
825 }
826 return NumDefs;
827}
828
830 assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
833 --Pred;
834 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
835 Pred->setFlag(BundledSucc);
836}
837
839 assert(!isBundledWithSucc() && "MI is already bundled with its successor");
842 ++Succ;
843 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
844 Succ->setFlag(BundledPred);
845}
846
848 assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
851 --Pred;
852 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
853 Pred->clearFlag(BundledSucc);
854}
855
857 assert(isBundledWithSucc() && "MI isn't bundled with its successor");
860 ++Succ;
861 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
862 Succ->clearFlag(BundledPred);
863}
864
866 if (isInlineAsm()) {
867 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
868 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
869 return true;
870 }
871 return false;
872}
873
875 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
876 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
877 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
878}
879
881 unsigned *GroupNo) const {
882 assert(isInlineAsm() && "Expected an inline asm instruction");
883 assert(OpIdx < getNumOperands() && "OpIdx out of range");
884
885 // Ignore queries about the initial operands.
887 return -1;
888
889 unsigned Group = 0;
890 unsigned NumOps;
891 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
892 i += NumOps) {
893 const MachineOperand &FlagMO = getOperand(i);
894 // If we reach the implicit register operands, stop looking.
895 if (!FlagMO.isImm())
896 return -1;
897 const InlineAsm::Flag F(FlagMO.getImm());
898 NumOps = 1 + F.getNumOperandRegisters();
899 if (i + NumOps > OpIdx) {
900 if (GroupNo)
901 *GroupNo = Group;
902 return i;
903 }
904 ++Group;
905 }
906 return -1;
907}
908
910 assert(isDebugLabel() && "not a DBG_LABEL");
911 return cast<DILabel>(getOperand(0).getMetadata());
912}
913
915 assert((isDebugValueLike()) && "not a DBG_VALUE*");
916 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
917 return getOperand(VariableOp);
918}
919
921 assert((isDebugValueLike()) && "not a DBG_VALUE*");
922 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
923 return getOperand(VariableOp);
924}
925
927 return cast<DILocalVariable>(getDebugVariableOp().getMetadata());
928}
929
931 assert((isDebugValueLike()) && "not a DBG_VALUE*");
932 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
933 return getOperand(ExpressionOp);
934}
935
937 assert((isDebugValueLike()) && "not a DBG_VALUE*");
938 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
939 return getOperand(ExpressionOp);
940}
941
943 return cast<DIExpression>(getDebugExpressionOp().getMetadata());
944}
945
948}
949
952 const TargetInstrInfo *TII,
953 const TargetRegisterInfo *TRI) const {
954 assert(getParent() && "Can't have an MBB reference here!");
955 assert(getMF() && "Can't have an MF reference here!");
956 const MachineFunction &MF = *getMF();
957
958 // Most opcodes have fixed constraints in their MCInstrDesc.
959 if (!isInlineAsm())
960 return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
961
962 if (!getOperand(OpIdx).isReg())
963 return nullptr;
964
965 // For tied uses on inline asm, get the constraint from the def.
966 unsigned DefIdx;
967 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
968 OpIdx = DefIdx;
969
970 // Inline asm stores register class constraints in the flag word.
971 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
972 if (FlagIdx < 0)
973 return nullptr;
974
975 const InlineAsm::Flag F(getOperand(FlagIdx).getImm());
976 unsigned RCID;
977 if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) &&
978 F.hasRegClassConstraint(RCID))
979 return TRI->getRegClass(RCID);
980
981 // Assume that all registers in a memory operand are pointers.
982 if (F.isMemKind())
983 return TRI->getPointerRegClass(MF);
984
985 return nullptr;
986}
987
989 Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
990 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
991 // Check every operands inside the bundle if we have
992 // been asked to.
993 if (ExploreBundle)
994 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
995 ++OpndIt)
996 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
997 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
998 else
999 // Otherwise, just check the current operands.
1000 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
1001 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
1002 return CurRC;
1003}
1004
1005const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
1006 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
1007 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1008 assert(CurRC && "Invalid initial register class");
1009 // Check if Reg is constrained by some of its use/def from MI.
1010 const MachineOperand &MO = getOperand(OpIdx);
1011 if (!MO.isReg() || MO.getReg() != Reg)
1012 return CurRC;
1013 // If yes, accumulate the constraints through the operand.
1014 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
1015}
1016
1018 unsigned OpIdx, const TargetRegisterClass *CurRC,
1019 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1020 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
1021 const MachineOperand &MO = getOperand(OpIdx);
1022 assert(MO.isReg() &&
1023 "Cannot get register constraints for non-register operand");
1024 assert(CurRC && "Invalid initial register class");
1025 if (unsigned SubIdx = MO.getSubReg()) {
1026 if (OpRC)
1027 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
1028 else
1029 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
1030 } else if (OpRC)
1031 CurRC = TRI->getCommonSubClass(CurRC, OpRC);
1032 return CurRC;
1033}
1034
1035/// Return the number of instructions inside the MI bundle, not counting the
1036/// header instruction.
1039 unsigned Size = 0;
1040 while (I->isBundledWithSucc()) {
1041 ++Size;
1042 ++I;
1043 }
1044 return Size;
1045}
1046
1047/// Returns true if the MachineInstr has an implicit-use operand of exactly
1048/// the given register (not considering sub/super-registers).
1050 for (const MachineOperand &MO : implicit_operands()) {
1051 if (MO.isReg() && MO.isUse() && MO.getReg() == Reg)
1052 return true;
1053 }
1054 return false;
1055}
1056
1057/// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1058/// the specific register or -1 if it is not found. It further tightens
1059/// the search criteria to a use that kills the register if isKill is true.
1061 const TargetRegisterInfo *TRI,
1062 bool isKill) const {
1063 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1064 const MachineOperand &MO = getOperand(i);
1065 if (!MO.isReg() || !MO.isUse())
1066 continue;
1067 Register MOReg = MO.getReg();
1068 if (!MOReg)
1069 continue;
1070 if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
1071 if (!isKill || MO.isKill())
1072 return i;
1073 }
1074 return -1;
1075}
1076
1077/// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1078/// indicating if this instruction reads or writes Reg. This also considers
1079/// partial defines.
1080std::pair<bool,bool>
1082 SmallVectorImpl<unsigned> *Ops) const {
1083 bool PartDef = false; // Partial redefine.
1084 bool FullDef = false; // Full define.
1085 bool Use = false;
1086
1087 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1088 const MachineOperand &MO = getOperand(i);
1089 if (!MO.isReg() || MO.getReg() != Reg)
1090 continue;
1091 if (Ops)
1092 Ops->push_back(i);
1093 if (MO.isUse())
1094 Use |= !MO.isUndef();
1095 else if (MO.getSubReg() && !MO.isUndef())
1096 // A partial def undef doesn't count as reading the register.
1097 PartDef = true;
1098 else
1099 FullDef = true;
1100 }
1101 // A partial redefine uses Reg unless there is also a full define.
1102 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1103}
1104
1105/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1106/// the specified register or -1 if it is not found. If isDead is true, defs
1107/// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1108/// also checks if there is a def of a super-register.
1110 const TargetRegisterInfo *TRI,
1111 bool isDead, bool Overlap) const {
1112 bool isPhys = Reg.isPhysical();
1113 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1114 const MachineOperand &MO = getOperand(i);
1115 // Accept regmask operands when Overlap is set.
1116 // Ignore them when looking for a specific def operand (Overlap == false).
1117 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1118 return i;
1119 if (!MO.isReg() || !MO.isDef())
1120 continue;
1121 Register MOReg = MO.getReg();
1122 bool Found = (MOReg == Reg);
1123 if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1124 if (Overlap)
1125 Found = TRI->regsOverlap(MOReg, Reg);
1126 else
1127 Found = TRI->isSubRegister(MOReg, Reg);
1128 }
1129 if (Found && (!isDead || MO.isDead()))
1130 return i;
1131 }
1132 return -1;
1133}
1134
1135/// findFirstPredOperandIdx() - Find the index of the first operand in the
1136/// operand list that is used to represent the predicate. It returns -1 if
1137/// none is found.
1139 // Don't call MCID.findFirstPredOperandIdx() because this variant
1140 // is sometimes called on an instruction that's not yet complete, and
1141 // so the number of operands is less than the MCID indicates. In
1142 // particular, the PTX target does this.
1143 const MCInstrDesc &MCID = getDesc();
1144 if (MCID.isPredicable()) {
1145 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1146 if (MCID.operands()[i].isPredicate())
1147 return i;
1148 }
1149
1150 return -1;
1151}
1152
1153// MachineOperand::TiedTo is 4 bits wide.
1154const unsigned TiedMax = 15;
1155
1156/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1157///
1158/// Use and def operands can be tied together, indicated by a non-zero TiedTo
1159/// field. TiedTo can have these values:
1160///
1161/// 0: Operand is not tied to anything.
1162/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1163/// TiedMax: Tied to an operand >= TiedMax-1.
1164///
1165/// The tied def must be one of the first TiedMax operands on a normal
1166/// instruction. INLINEASM instructions allow more tied defs.
1167///
1168void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1169 MachineOperand &DefMO = getOperand(DefIdx);
1170 MachineOperand &UseMO = getOperand(UseIdx);
1171 assert(DefMO.isDef() && "DefIdx must be a def operand");
1172 assert(UseMO.isUse() && "UseIdx must be a use operand");
1173 assert(!DefMO.isTied() && "Def is already tied to another use");
1174 assert(!UseMO.isTied() && "Use is already tied to another def");
1175
1176 if (DefIdx < TiedMax)
1177 UseMO.TiedTo = DefIdx + 1;
1178 else {
1179 // Inline asm can use the group descriptors to find tied operands,
1180 // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1181 // but on normal instruction, the tied def must be within the first TiedMax
1182 // operands.
1183 assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1184 "DefIdx out of range");
1185 UseMO.TiedTo = TiedMax;
1186 }
1187
1188 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1189 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1190}
1191
1192/// Given the index of a tied register operand, find the operand it is tied to.
1193/// Defs are tied to uses and vice versa. Returns the index of the tied operand
1194/// which must exist.
1195unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1196 const MachineOperand &MO = getOperand(OpIdx);
1197 assert(MO.isTied() && "Operand isn't tied");
1198
1199 // Normally TiedTo is in range.
1200 if (MO.TiedTo < TiedMax)
1201 return MO.TiedTo - 1;
1202
1203 // Uses on normal instructions can be out of range.
1204 if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1205 // Normal tied defs must be in the 0..TiedMax-1 range.
1206 if (MO.isUse())
1207 return TiedMax - 1;
1208 // MO is a def. Search for the tied use.
1209 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1210 const MachineOperand &UseMO = getOperand(i);
1211 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1212 return i;
1213 }
1214 llvm_unreachable("Can't find tied use");
1215 }
1216
1217 if (getOpcode() == TargetOpcode::STATEPOINT) {
1218 // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1219 // on registers.
1220 StatepointOpers SO(this);
1221 unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1222 assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1223 unsigned NumDefs = getNumDefs();
1224 for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1225 while (!getOperand(CurUseIdx).isReg())
1226 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1227 if (OpIdx == CurDefIdx)
1228 return CurUseIdx;
1229 if (OpIdx == CurUseIdx)
1230 return CurDefIdx;
1231 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1232 }
1233 llvm_unreachable("Can't find tied use");
1234 }
1235
1236 // Now deal with inline asm by parsing the operand group descriptor flags.
1237 // Find the beginning of each operand group.
1238 SmallVector<unsigned, 8> GroupIdx;
1239 unsigned OpIdxGroup = ~0u;
1240 unsigned NumOps;
1241 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1242 i += NumOps) {
1243 const MachineOperand &FlagMO = getOperand(i);
1244 assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1245 unsigned CurGroup = GroupIdx.size();
1246 GroupIdx.push_back(i);
1247 const InlineAsm::Flag F(FlagMO.getImm());
1248 NumOps = 1 + F.getNumOperandRegisters();
1249 // OpIdx belongs to this operand group.
1250 if (OpIdx > i && OpIdx < i + NumOps)
1251 OpIdxGroup = CurGroup;
1252 unsigned TiedGroup;
1253 if (!F.isUseOperandTiedToDef(TiedGroup))
1254 continue;
1255 // Operands in this group are tied to operands in TiedGroup which must be
1256 // earlier. Find the number of operands between the two groups.
1257 unsigned Delta = i - GroupIdx[TiedGroup];
1258
1259 // OpIdx is a use tied to TiedGroup.
1260 if (OpIdxGroup == CurGroup)
1261 return OpIdx - Delta;
1262
1263 // OpIdx is a def tied to this use group.
1264 if (OpIdxGroup == TiedGroup)
1265 return OpIdx + Delta;
1266 }
1267 llvm_unreachable("Invalid tied operand on inline asm");
1268}
1269
1270/// clearKillInfo - Clears kill flags on all operands.
1271///
1273 for (MachineOperand &MO : operands()) {
1274 if (MO.isReg() && MO.isUse())
1275 MO.setIsKill(false);
1276 }
1277}
1278
1280 unsigned SubIdx,
1281 const TargetRegisterInfo &RegInfo) {
1282 if (ToReg.isPhysical()) {
1283 if (SubIdx)
1284 ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1285 for (MachineOperand &MO : operands()) {
1286 if (!MO.isReg() || MO.getReg() != FromReg)
1287 continue;
1288 MO.substPhysReg(ToReg, RegInfo);
1289 }
1290 } else {
1291 for (MachineOperand &MO : operands()) {
1292 if (!MO.isReg() || MO.getReg() != FromReg)
1293 continue;
1294 MO.substVirtReg(ToReg, SubIdx, RegInfo);
1295 }
1296 }
1297}
1298
1299/// isSafeToMove - Return true if it is safe to move this instruction. If
1300/// SawStore is set to true, it means that there is a store (or call) between
1301/// the instruction's location and its intended destination.
1302bool MachineInstr::isSafeToMove(bool &SawStore) const {
1303 // Ignore stuff that we obviously can't move.
1304 //
1305 // Treat volatile loads as stores. This is not strictly necessary for
1306 // volatiles, but it is required for atomic loads. It is not allowed to move
1307 // a load across an atomic load with Ordering > Monotonic.
1308 if (mayStore() || isCall() || isPHI() ||
1309 (mayLoad() && hasOrderedMemoryRef())) {
1310 SawStore = true;
1311 return false;
1312 }
1313
1314 if (isPosition() || isDebugInstr() || isTerminator() ||
1317 return false;
1318
1319 // See if this instruction does a load. If so, we have to guarantee that the
1320 // loaded value doesn't change between the load and the its intended
1321 // destination. The check for isInvariantLoad gives the target the chance to
1322 // classify the load as always returning a constant, e.g. a constant pool
1323 // load.
1325 // Otherwise, this is a real load. If there is a store between the load and
1326 // end of block, we can't move it.
1327 return !SawStore;
1328
1329 return true;
1330}
1331
1333 // Don't delete frame allocation labels.
1334 // FIXME: Why is LOCAL_ESCAPE not considered in MachineInstr::isLabel?
1335 if (getOpcode() == TargetOpcode::LOCAL_ESCAPE)
1336 return false;
1337
1338 // Don't delete FAKE_USE.
1339 // FIXME: Why is FAKE_USE not considered in MachineInstr::isPosition?
1340 if (isFakeUse())
1341 return false;
1342
1343 // LIFETIME markers should be preserved.
1344 // FIXME: Why are LIFETIME markers not considered in MachineInstr::isPosition?
1345 if (isLifetimeMarker())
1346 return false;
1347
1348 // If we can move an instruction, we can remove it. Otherwise, it has
1349 // a side-effect of some sort.
1350 bool SawStore = false;
1351 return isPHI() || isSafeToMove(SawStore);
1352}
1353
1355 LiveRegUnits *LivePhysRegs) const {
1356 // Instructions without side-effects are dead iff they only define dead regs.
1357 // This function is hot and this loop returns early in the common case,
1358 // so only perform additional checks before this if absolutely necessary.
1359 for (const MachineOperand &MO : all_defs()) {
1360 Register Reg = MO.getReg();
1361 if (Reg.isPhysical()) {
1362 // Don't delete live physreg defs, or any reserved register defs.
1363 if (!LivePhysRegs || !LivePhysRegs->available(Reg) || MRI.isReserved(Reg))
1364 return false;
1365 } else {
1366 if (MO.isDead())
1367 continue;
1368 for (const MachineInstr &Use : MRI.use_nodbg_instructions(Reg)) {
1369 if (&Use != this)
1370 // This def has a non-debug use. Don't delete the instruction!
1371 return false;
1372 }
1373 }
1374 }
1375
1376 // Technically speaking inline asm without side effects and no defs can still
1377 // be deleted. But there is so much bad inline asm code out there, we should
1378 // let them be.
1379 if (isInlineAsm())
1380 return false;
1381
1382 // FIXME: See issue #105950 for why LIFETIME markers are considered dead here.
1383 if (isLifetimeMarker())
1384 return true;
1385
1386 // If there are no defs with uses, then we call the instruction dead so long
1387 // as we do not suspect it may have sideeffects.
1388 return wouldBeTriviallyDead();
1389}
1390
1392 BatchAAResults *AA, bool UseTBAA,
1393 const MachineMemOperand *MMOa,
1394 const MachineMemOperand *MMOb) {
1395 // The following interface to AA is fashioned after DAGCombiner::isAlias and
1396 // operates with MachineMemOperand offset with some important assumptions:
1397 // - LLVM fundamentally assumes flat address spaces.
1398 // - MachineOperand offset can *only* result from legalization and cannot
1399 // affect queries other than the trivial case of overlap checking.
1400 // - These offsets never wrap and never step outside of allocated objects.
1401 // - There should never be any negative offsets here.
1402 //
1403 // FIXME: Modify API to hide this math from "user"
1404 // Even before we go to AA we can reason locally about some memory objects. It
1405 // can save compile time, and possibly catch some corner cases not currently
1406 // covered.
1407
1408 int64_t OffsetA = MMOa->getOffset();
1409 int64_t OffsetB = MMOb->getOffset();
1410 int64_t MinOffset = std::min(OffsetA, OffsetB);
1411
1412 LocationSize WidthA = MMOa->getSize();
1413 LocationSize WidthB = MMOb->getSize();
1414 bool KnownWidthA = WidthA.hasValue();
1415 bool KnownWidthB = WidthB.hasValue();
1416 bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
1417
1418 const Value *ValA = MMOa->getValue();
1419 const Value *ValB = MMOb->getValue();
1420 bool SameVal = (ValA && ValB && (ValA == ValB));
1421 if (!SameVal) {
1422 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1423 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1424 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1425 return false;
1426 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1427 return false;
1428 if (PSVa && PSVb && (PSVa == PSVb))
1429 SameVal = true;
1430 }
1431
1432 if (SameVal && BothMMONonScalable) {
1433 if (!KnownWidthA || !KnownWidthB)
1434 return true;
1435 int64_t MaxOffset = std::max(OffsetA, OffsetB);
1436 int64_t LowWidth = (MinOffset == OffsetA)
1437 ? WidthA.getValue().getKnownMinValue()
1438 : WidthB.getValue().getKnownMinValue();
1439 return (MinOffset + LowWidth > MaxOffset);
1440 }
1441
1442 if (!AA)
1443 return true;
1444
1445 if (!ValA || !ValB)
1446 return true;
1447
1448 assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1449 assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1450
1451 // If Scalable Location Size has non-zero offset, Width + Offset does not work
1452 // at the moment
1453 if ((WidthA.isScalable() && OffsetA > 0) ||
1454 (WidthB.isScalable() && OffsetB > 0))
1455 return true;
1456
1457 int64_t OverlapA =
1458 KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
1460 int64_t OverlapB =
1461 KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
1463
1464 LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
1465 ? WidthA
1466 : LocationSize::precise(OverlapA);
1467 LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
1468 ? WidthB
1469 : LocationSize::precise(OverlapB);
1470
1471 return !AA->isNoAlias(
1472 MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1473 MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1474}
1475
1477 bool UseTBAA) const {
1478 const MachineFunction *MF = getMF();
1480 const MachineFrameInfo &MFI = MF->getFrameInfo();
1481
1482 // Exclude call instruction which may alter the memory but can not be handled
1483 // by this function.
1484 if (isCall() || Other.isCall())
1485 return true;
1486
1487 // If neither instruction stores to memory, they can't alias in any
1488 // meaningful way, even if they read from the same address.
1489 if (!mayStore() && !Other.mayStore())
1490 return false;
1491
1492 // Both instructions must be memory operations to be able to alias.
1493 if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1494 return false;
1495
1496 // Let the target decide if memory accesses cannot possibly overlap.
1498 return false;
1499
1500 // Memory operations without memory operands may access anything. Be
1501 // conservative and assume `MayAlias`.
1502 if (memoperands_empty() || Other.memoperands_empty())
1503 return true;
1504
1505 // Skip if there are too many memory operands.
1506 auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1507 if (NumChecks > TII->getMemOperandAACheckLimit())
1508 return true;
1509
1510 // Check each pair of memory operands from both instructions, which can't
1511 // alias only if all pairs won't alias.
1512 for (auto *MMOa : memoperands())
1513 for (auto *MMOb : Other.memoperands())
1514 if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1515 return true;
1516
1517 return false;
1518}
1519
1521 bool UseTBAA) const {
1522 if (AA) {
1523 BatchAAResults BAA(*AA);
1524 return mayAlias(&BAA, Other, UseTBAA);
1525 }
1526 return mayAlias(static_cast<BatchAAResults *>(nullptr), Other, UseTBAA);
1527}
1528
1529/// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1530/// or volatile memory reference, or if the information describing the memory
1531/// reference is not available. Return false if it is known to have no ordered
1532/// memory references.
1534 // An instruction known never to access memory won't have a volatile access.
1535 if (!mayStore() &&
1536 !mayLoad() &&
1537 !isCall() &&
1539 return false;
1540
1541 // Otherwise, if the instruction has no memory reference information,
1542 // conservatively assume it wasn't preserved.
1543 if (memoperands_empty())
1544 return true;
1545
1546 // Check if any of our memory operands are ordered.
1547 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1548 return !MMO->isUnordered();
1549 });
1550}
1551
1552/// isDereferenceableInvariantLoad - Return true if this instruction will never
1553/// trap and is loading from a location whose value is invariant across a run of
1554/// this function.
1556 // If the instruction doesn't load at all, it isn't an invariant load.
1557 if (!mayLoad())
1558 return false;
1559
1560 // If the instruction has lost its memoperands, conservatively assume that
1561 // it may not be an invariant load.
1562 if (memoperands_empty())
1563 return false;
1564
1565 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1566
1567 for (MachineMemOperand *MMO : memoperands()) {
1568 if (!MMO->isUnordered())
1569 // If the memory operand has ordering side effects, we can't move the
1570 // instruction. Such an instruction is technically an invariant load,
1571 // but the caller code would need updated to expect that.
1572 return false;
1573 if (MMO->isStore()) return false;
1574 if (MMO->isInvariant() && MMO->isDereferenceable())
1575 continue;
1576
1577 // A load from a constant PseudoSourceValue is invariant.
1578 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1579 if (PSV->isConstant(&MFI))
1580 continue;
1581 }
1582
1583 // Otherwise assume conservatively.
1584 return false;
1585 }
1586
1587 // Everything checks out.
1588 return true;
1589}
1590
1592 if (!isPHI())
1593 return {};
1594 assert(getNumOperands() >= 3 &&
1595 "It's illegal to have a PHI without source operands");
1596
1597 Register Reg = getOperand(1).getReg();
1598 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1599 if (getOperand(i).getReg() != Reg)
1600 return {};
1601 return Reg;
1602}
1603
1606 return true;
1607 if (isInlineAsm()) {
1608 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1609 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1610 return true;
1611 }
1612
1613 return false;
1614}
1615
1617 return mayStore() || isCall() ||
1619}
1620
1621/// allDefsAreDead - Return true if all the defs of this instruction are dead.
1622///
1624 for (const MachineOperand &MO : operands()) {
1625 if (!MO.isReg() || MO.isUse())
1626 continue;
1627 if (!MO.isDead())
1628 return false;
1629 }
1630 return true;
1631}
1632
1634 for (const MachineOperand &MO : implicit_operands()) {
1635 if (!MO.isReg() || MO.isUse())
1636 continue;
1637 if (!MO.isDead())
1638 return false;
1639 }
1640 return true;
1641}
1642
1643/// copyImplicitOps - Copy implicit register operands from specified
1644/// instruction to this instruction.
1646 const MachineInstr &MI) {
1647 for (const MachineOperand &MO :
1648 llvm::drop_begin(MI.operands(), MI.getDesc().getNumOperands()))
1649 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1650 addOperand(MF, MO);
1651}
1652
1654 const MCInstrDesc &MCID = getDesc();
1655 if (MCID.Opcode == TargetOpcode::STATEPOINT)
1656 return true;
1657 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1658 const auto &Operand = getOperand(I);
1659 if (!Operand.isReg() || Operand.isDef())
1660 // Ignore the defined registers as MCID marks only the uses as tied.
1661 continue;
1662 int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1663 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1664 if (ExpectedTiedIdx != TiedIdx)
1665 return true;
1666 }
1667 return false;
1668}
1669
1671 const MachineRegisterInfo &MRI) const {
1672 const MachineOperand &Op = getOperand(OpIdx);
1673 if (!Op.isReg())
1674 return LLT{};
1675
1676 if (isVariadic() || OpIdx >= getNumExplicitOperands())
1677 return MRI.getType(Op.getReg());
1678
1679 auto &OpInfo = getDesc().operands()[OpIdx];
1680 if (!OpInfo.isGenericType())
1681 return MRI.getType(Op.getReg());
1682
1683 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1684 return LLT{};
1685
1686 LLT TypeToPrint = MRI.getType(Op.getReg());
1687 // Don't mark the type index printed if it wasn't actually printed: maybe
1688 // another operand with the same type index has an actual type attached:
1689 if (TypeToPrint.isValid())
1690 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1691 return TypeToPrint;
1692}
1693
1694#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1696 dbgs() << " ";
1697 print(dbgs());
1698}
1699
1700LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1701 const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1702 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1703 if (Depth >= MaxDepth)
1704 return;
1705 if (!AlreadySeenInstrs.insert(this).second)
1706 return;
1707 // PadToColumn always inserts at least one space.
1708 // Don't mess up the alignment if we don't want any space.
1709 if (Depth)
1710 fdbgs().PadToColumn(Depth * 2);
1711 print(fdbgs());
1712 for (const MachineOperand &MO : operands()) {
1713 if (!MO.isReg() || MO.isDef())
1714 continue;
1715 Register Reg = MO.getReg();
1716 if (Reg.isPhysical())
1717 continue;
1718 const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1719 if (NewMI == nullptr)
1720 continue;
1721 NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1722 }
1723}
1724
1726 unsigned MaxDepth) const {
1727 SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1728 dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1729}
1730#endif
1731
1732void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1733 bool SkipDebugLoc, bool AddNewLine,
1734 const TargetInstrInfo *TII) const {
1735 const Module *M = nullptr;
1736 const Function *F = nullptr;
1737 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1738 F = &MF->getFunction();
1739 M = F->getParent();
1740 if (!TII)
1741 TII = MF->getSubtarget().getInstrInfo();
1742 }
1743
1744 ModuleSlotTracker MST(M);
1745 if (F)
1746 MST.incorporateFunction(*F);
1747 print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1748}
1749
1751 bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1752 bool AddNewLine, const TargetInstrInfo *TII) const {
1753 // We can be a bit tidier if we know the MachineFunction.
1754 const TargetRegisterInfo *TRI = nullptr;
1755 const MachineRegisterInfo *MRI = nullptr;
1756 const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1757 tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
1758
1759 if (isCFIInstruction())
1760 assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1761
1762 SmallBitVector PrintedTypes(8);
1763 bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1764 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1765 if (!ShouldPrintRegisterTies)
1766 return 0U;
1767 const MachineOperand &MO = getOperand(OpIdx);
1768 if (MO.isReg() && MO.isTied() && !MO.isDef())
1769 return findTiedOperandIdx(OpIdx);
1770 return 0U;
1771 };
1772 unsigned StartOp = 0;
1773 unsigned e = getNumOperands();
1774
1775 // Print explicitly defined operands on the left of an assignment syntax.
1776 while (StartOp < e) {
1777 const MachineOperand &MO = getOperand(StartOp);
1778 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1779 break;
1780
1781 if (StartOp != 0)
1782 OS << ", ";
1783
1784 LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1785 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1786 MO.print(OS, MST, TypeToPrint, StartOp, /*PrintDef=*/false, IsStandalone,
1787 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1788 ++StartOp;
1789 }
1790
1791 if (StartOp != 0)
1792 OS << " = ";
1793
1795 OS << "frame-setup ";
1797 OS << "frame-destroy ";
1799 OS << "nnan ";
1801 OS << "ninf ";
1803 OS << "nsz ";
1805 OS << "arcp ";
1807 OS << "contract ";
1809 OS << "afn ";
1811 OS << "reassoc ";
1813 OS << "nuw ";
1815 OS << "nsw ";
1817 OS << "exact ";
1819 OS << "nofpexcept ";
1821 OS << "nomerge ";
1823 OS << "nneg ";
1825 OS << "disjoint ";
1827 OS << "samesign ";
1828
1829 // Print the opcode name.
1830 if (TII)
1831 OS << TII->getName(getOpcode());
1832 else
1833 OS << "UNKNOWN";
1834
1835 if (SkipOpers)
1836 return;
1837
1838 // Print the rest of the operands.
1839 bool FirstOp = true;
1840 unsigned AsmDescOp = ~0u;
1841 unsigned AsmOpCount = 0;
1842
1844 // Print asm string.
1845 OS << " ";
1846 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1847 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1848 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1849 getOperand(OpIdx).print(OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true, IsStandalone,
1850 ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1851 IntrinsicInfo);
1852
1853 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1854 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1855 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1856 OS << " [sideeffect]";
1857 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1858 OS << " [mayload]";
1859 if (ExtraInfo & InlineAsm::Extra_MayStore)
1860 OS << " [maystore]";
1861 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1862 OS << " [isconvergent]";
1863 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1864 OS << " [alignstack]";
1866 OS << " [attdialect]";
1868 OS << " [inteldialect]";
1869
1870 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1871 FirstOp = false;
1872 }
1873
1874 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1875 const MachineOperand &MO = getOperand(i);
1876
1877 if (FirstOp) FirstOp = false; else OS << ",";
1878 OS << " ";
1879
1880 if (isDebugValueLike() && MO.isMetadata()) {
1881 // Pretty print DBG_VALUE* instructions.
1882 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1883 if (DIV && !DIV->getName().empty())
1884 OS << "!\"" << DIV->getName() << '\"';
1885 else {
1886 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1887 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1888 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1889 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1890 }
1891 } else if (isDebugLabel() && MO.isMetadata()) {
1892 // Pretty print DBG_LABEL instructions.
1893 auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1894 if (DIL && !DIL->getName().empty())
1895 OS << "\"" << DIL->getName() << '\"';
1896 else {
1897 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1898 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1899 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1900 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1901 }
1902 } else if (i == AsmDescOp && MO.isImm()) {
1903 // Pretty print the inline asm operand descriptor.
1904 OS << '$' << AsmOpCount++;
1905 unsigned Flag = MO.getImm();
1906 const InlineAsm::Flag F(Flag);
1907 OS << ":[";
1908 OS << F.getKindName();
1909
1910 unsigned RCID;
1911 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1912 if (TRI) {
1913 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1914 } else
1915 OS << ":RC" << RCID;
1916 }
1917
1918 if (F.isMemKind()) {
1919 const InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1920 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1921 }
1922
1923 unsigned TiedTo;
1924 if (F.isUseOperandTiedToDef(TiedTo))
1925 OS << " tiedto:$" << TiedTo;
1926
1927 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
1928 F.isRegUseKind()) &&
1929 F.getRegMayBeFolded()) {
1930 OS << " foldable";
1931 }
1932
1933 OS << ']';
1934
1935 // Compute the index of the next operand descriptor.
1936 AsmDescOp += 1 + F.getNumOperandRegisters();
1937 } else {
1938 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1939 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1940 if (MO.isImm() && isOperandSubregIdx(i))
1942 else
1943 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1944 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1945 }
1946 }
1947
1948 // Print any optional symbols attached to this instruction as-if they were
1949 // operands.
1950 if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1951 if (!FirstOp) {
1952 FirstOp = false;
1953 OS << ',';
1954 }
1955 OS << " pre-instr-symbol ";
1956 MachineOperand::printSymbol(OS, *PreInstrSymbol);
1957 }
1958 if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
1959 if (!FirstOp) {
1960 FirstOp = false;
1961 OS << ',';
1962 }
1963 OS << " post-instr-symbol ";
1964 MachineOperand::printSymbol(OS, *PostInstrSymbol);
1965 }
1966 if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
1967 if (!FirstOp) {
1968 FirstOp = false;
1969 OS << ',';
1970 }
1971 OS << " heap-alloc-marker ";
1972 HeapAllocMarker->printAsOperand(OS, MST);
1973 }
1974 if (MDNode *PCSections = getPCSections()) {
1975 if (!FirstOp) {
1976 FirstOp = false;
1977 OS << ',';
1978 }
1979 OS << " pcsections ";
1980 PCSections->printAsOperand(OS, MST);
1981 }
1982 if (MDNode *MMRA = getMMRAMetadata()) {
1983 if (!FirstOp) {
1984 FirstOp = false;
1985 OS << ',';
1986 }
1987 OS << " mmra ";
1988 MMRA->printAsOperand(OS, MST);
1989 }
1990 if (uint32_t CFIType = getCFIType()) {
1991 if (!FirstOp)
1992 OS << ',';
1993 OS << " cfi-type " << CFIType;
1994 }
1995
1996 if (DebugInstrNum) {
1997 if (!FirstOp)
1998 OS << ",";
1999 OS << " debug-instr-number " << DebugInstrNum;
2000 }
2001
2002 if (!SkipDebugLoc) {
2003 if (const DebugLoc &DL = getDebugLoc()) {
2004 if (!FirstOp)
2005 OS << ',';
2006 OS << " debug-location ";
2007 DL->printAsOperand(OS, MST);
2008 }
2009 }
2010
2011 if (!memoperands_empty()) {
2013 const LLVMContext *Context = nullptr;
2014 std::unique_ptr<LLVMContext> CtxPtr;
2015 const MachineFrameInfo *MFI = nullptr;
2016 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
2017 MFI = &MF->getFrameInfo();
2018 Context = &MF->getFunction().getContext();
2019 } else {
2020 CtxPtr = std::make_unique<LLVMContext>();
2021 Context = CtxPtr.get();
2022 }
2023
2024 OS << " :: ";
2025 bool NeedComma = false;
2026 for (const MachineMemOperand *Op : memoperands()) {
2027 if (NeedComma)
2028 OS << ", ";
2029 Op->print(OS, MST, SSNs, *Context, MFI, TII);
2030 NeedComma = true;
2031 }
2032 }
2033
2034 if (SkipDebugLoc)
2035 return;
2036
2037 bool HaveSemi = false;
2038
2039 // Print debug location information.
2040 if (const DebugLoc &DL = getDebugLoc()) {
2041 if (!HaveSemi) {
2042 OS << ';';
2043 HaveSemi = true;
2044 }
2045 OS << ' ';
2046 DL.print(OS);
2047 }
2048
2049 // Print extra comments for DEBUG_VALUE and friends if they are well-formed.
2050 if ((isNonListDebugValue() && getNumOperands() >= 4) ||
2051 (isDebugValueList() && getNumOperands() >= 2) ||
2052 (isDebugRef() && getNumOperands() >= 3)) {
2053 if (getDebugVariableOp().isMetadata()) {
2054 if (!HaveSemi) {
2055 OS << ";";
2056 HaveSemi = true;
2057 }
2058 auto *DV = getDebugVariable();
2059 OS << " line no:" << DV->getLine();
2061 OS << " indirect";
2062 }
2063 }
2064 // TODO: DBG_LABEL
2065
2066 if (AddNewLine)
2067 OS << '\n';
2068}
2069
2071 const TargetRegisterInfo *RegInfo,
2072 bool AddIfNotFound) {
2073 bool isPhysReg = IncomingReg.isPhysical();
2074 bool hasAliases = isPhysReg &&
2075 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
2076 bool Found = false;
2078 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2079 MachineOperand &MO = getOperand(i);
2080 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
2081 continue;
2082
2083 // DEBUG_VALUE nodes do not contribute to code generation and should
2084 // always be ignored. Failure to do so may result in trying to modify
2085 // KILL flags on DEBUG_VALUE nodes.
2086 if (MO.isDebug())
2087 continue;
2088
2089 Register Reg = MO.getReg();
2090 if (!Reg)
2091 continue;
2092
2093 if (Reg == IncomingReg) {
2094 if (!Found) {
2095 if (MO.isKill())
2096 // The register is already marked kill.
2097 return true;
2098 if (isPhysReg && isRegTiedToDefOperand(i))
2099 // Two-address uses of physregs must not be marked kill.
2100 return true;
2101 MO.setIsKill();
2102 Found = true;
2103 }
2104 } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
2105 // A super-register kill already exists.
2106 if (RegInfo->isSuperRegister(IncomingReg, Reg))
2107 return true;
2108 if (RegInfo->isSubRegister(IncomingReg, Reg))
2109 DeadOps.push_back(i);
2110 }
2111 }
2112
2113 // Trim unneeded kill operands.
2114 while (!DeadOps.empty()) {
2115 unsigned OpIdx = DeadOps.back();
2116 if (getOperand(OpIdx).isImplicit() &&
2117 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2118 removeOperand(OpIdx);
2119 else
2120 getOperand(OpIdx).setIsKill(false);
2121 DeadOps.pop_back();
2122 }
2123
2124 // If not found, this means an alias of one of the operands is killed. Add a
2125 // new implicit operand if required.
2126 if (!Found && AddIfNotFound) {
2128 false /*IsDef*/,
2129 true /*IsImp*/,
2130 true /*IsKill*/));
2131 return true;
2132 }
2133 return Found;
2134}
2135
2137 const TargetRegisterInfo *RegInfo) {
2138 if (!Reg.isPhysical())
2139 RegInfo = nullptr;
2140 for (MachineOperand &MO : operands()) {
2141 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
2142 continue;
2143 Register OpReg = MO.getReg();
2144 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
2145 MO.setIsKill(false);
2146 }
2147}
2148
2150 const TargetRegisterInfo *RegInfo,
2151 bool AddIfNotFound) {
2152 bool isPhysReg = Reg.isPhysical();
2153 bool hasAliases = isPhysReg &&
2154 MCRegAliasIterator(Reg, RegInfo, false).isValid();
2155 bool Found = false;
2157 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2158 MachineOperand &MO = getOperand(i);
2159 if (!MO.isReg() || !MO.isDef())
2160 continue;
2161 Register MOReg = MO.getReg();
2162 if (!MOReg)
2163 continue;
2164
2165 if (MOReg == Reg) {
2166 MO.setIsDead();
2167 Found = true;
2168 } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
2169 // There exists a super-register that's marked dead.
2170 if (RegInfo->isSuperRegister(Reg, MOReg))
2171 return true;
2172 if (RegInfo->isSubRegister(Reg, MOReg))
2173 DeadOps.push_back(i);
2174 }
2175 }
2176
2177 // Trim unneeded dead operands.
2178 while (!DeadOps.empty()) {
2179 unsigned OpIdx = DeadOps.back();
2180 if (getOperand(OpIdx).isImplicit() &&
2181 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2182 removeOperand(OpIdx);
2183 else
2184 getOperand(OpIdx).setIsDead(false);
2185 DeadOps.pop_back();
2186 }
2187
2188 // If not found, this means an alias of one of the operands is dead. Add a
2189 // new implicit operand if required.
2190 if (Found || !AddIfNotFound)
2191 return Found;
2192
2194 true /*IsDef*/,
2195 true /*IsImp*/,
2196 false /*IsKill*/,
2197 true /*IsDead*/));
2198 return true;
2199}
2200
2202 for (MachineOperand &MO : all_defs())
2203 if (MO.getReg() == Reg)
2204 MO.setIsDead(false);
2205}
2206
2208 for (MachineOperand &MO : all_defs())
2209 if (MO.getReg() == Reg && MO.getSubReg() != 0)
2210 MO.setIsUndef(IsUndef);
2211}
2212
2214 const TargetRegisterInfo *RegInfo) {
2215 if (Reg.isPhysical()) {
2216 MachineOperand *MO = findRegisterDefOperand(Reg, RegInfo, false, false);
2217 if (MO)
2218 return;
2219 } else {
2220 for (const MachineOperand &MO : all_defs()) {
2221 if (MO.getReg() == Reg && MO.getSubReg() == 0)
2222 return;
2223 }
2224 }
2226 true /*IsDef*/,
2227 true /*IsImp*/));
2228}
2229
2231 const TargetRegisterInfo &TRI) {
2232 bool HasRegMask = false;
2233 for (MachineOperand &MO : operands()) {
2234 if (MO.isRegMask()) {
2235 HasRegMask = true;
2236 continue;
2237 }
2238 if (!MO.isReg() || !MO.isDef()) continue;
2239 Register Reg = MO.getReg();
2240 if (!Reg.isPhysical())
2241 continue;
2242 // If there are no uses, including partial uses, the def is dead.
2243 if (llvm::none_of(UsedRegs,
2244 [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
2245 MO.setIsDead();
2246 }
2247
2248 // This is a call with a register mask operand.
2249 // Mask clobbers are always dead, so add defs for the non-dead defines.
2250 if (HasRegMask)
2251 for (const Register &UsedReg : UsedRegs)
2252 addRegisterDefined(UsedReg, &TRI);
2253}
2254
2255unsigned
2257 // Build up a buffer of hash code components.
2258 SmallVector<size_t, 16> HashComponents;
2259 HashComponents.reserve(MI->getNumOperands() + 1);
2260 HashComponents.push_back(MI->getOpcode());
2261 for (const MachineOperand &MO : MI->operands()) {
2262 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2263 continue; // Skip virtual register defs.
2264
2265 HashComponents.push_back(hash_value(MO));
2266 }
2267 return hash_combine_range(HashComponents.begin(), HashComponents.end());
2268}
2269
2271 // Find the source location cookie.
2272 const MDNode *LocMD = nullptr;
2273 for (unsigned i = getNumOperands(); i != 0; --i) {
2274 if (getOperand(i-1).isMetadata() &&
2275 (LocMD = getOperand(i-1).getMetadata()) &&
2276 LocMD->getNumOperands() != 0) {
2277 if (mdconst::hasa<ConstantInt>(LocMD->getOperand(0)))
2278 return LocMD;
2279 }
2280 }
2281
2282 return nullptr;
2283}
2284
2287 const MDNode *LocMD = getLocCookieMD();
2288 uint64_t LocCookie =
2289 LocMD
2290 ? mdconst::extract<ConstantInt>(LocMD->getOperand(0))->getZExtValue()
2291 : 0;
2293 Ctx.diagnose(DiagnosticInfoInlineAsm(LocCookie, Msg));
2294}
2295
2297 const Function &Fn = getMF()->getFunction();
2298 Fn.getContext().diagnose(
2300}
2301
2303 const MCInstrDesc &MCID, bool IsIndirect,
2304 Register Reg, const MDNode *Variable,
2305 const MDNode *Expr) {
2306 assert(isa<DILocalVariable>(Variable) && "not a variable");
2307 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2308 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2309 "Expected inlined-at fields to agree");
2310 auto MIB = BuildMI(MF, DL, MCID).addReg(Reg);
2311 if (IsIndirect)
2312 MIB.addImm(0U);
2313 else
2314 MIB.addReg(0U);
2315 return MIB.addMetadata(Variable).addMetadata(Expr);
2316}
2317
2319 const MCInstrDesc &MCID, bool IsIndirect,
2320 ArrayRef<MachineOperand> DebugOps,
2321 const MDNode *Variable, const MDNode *Expr) {
2322 assert(isa<DILocalVariable>(Variable) && "not a variable");
2323 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2324 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2325 "Expected inlined-at fields to agree");
2326 if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2327 assert(DebugOps.size() == 1 &&
2328 "DBG_VALUE must contain exactly one debug operand");
2329 MachineOperand DebugOp = DebugOps[0];
2330 if (DebugOp.isReg())
2331 return BuildMI(MF, DL, MCID, IsIndirect, DebugOp.getReg(), Variable,
2332 Expr);
2333
2334 auto MIB = BuildMI(MF, DL, MCID).add(DebugOp);
2335 if (IsIndirect)
2336 MIB.addImm(0U);
2337 else
2338 MIB.addReg(0U);
2339 return MIB.addMetadata(Variable).addMetadata(Expr);
2340 }
2341
2342 auto MIB = BuildMI(MF, DL, MCID);
2343 MIB.addMetadata(Variable).addMetadata(Expr);
2344 for (const MachineOperand &DebugOp : DebugOps)
2345 if (DebugOp.isReg())
2346 MIB.addReg(DebugOp.getReg());
2347 else
2348 MIB.add(DebugOp);
2349 return MIB;
2350}
2351
2354 const DebugLoc &DL, const MCInstrDesc &MCID,
2355 bool IsIndirect, Register Reg,
2356 const MDNode *Variable, const MDNode *Expr) {
2357 MachineFunction &MF = *BB.getParent();
2358 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2359 BB.insert(I, MI);
2360 return MachineInstrBuilder(MF, MI);
2361}
2362
2365 const DebugLoc &DL, const MCInstrDesc &MCID,
2366 bool IsIndirect,
2367 ArrayRef<MachineOperand> DebugOps,
2368 const MDNode *Variable, const MDNode *Expr) {
2369 MachineFunction &MF = *BB.getParent();
2370 MachineInstr *MI =
2371 BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2372 BB.insert(I, MI);
2373 return MachineInstrBuilder(MF, *MI);
2374}
2375
2376/// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2377/// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2379 const MachineInstr &MI,
2380 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2381 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2382 "Expected inlined-at fields to agree");
2383
2384 const DIExpression *Expr = MI.getDebugExpression();
2385 if (MI.isIndirectDebugValue()) {
2386 assert(MI.getDebugOffset().getImm() == 0 &&
2387 "DBG_VALUE with nonzero offset");
2389 } else if (MI.isDebugValueList()) {
2390 // We will replace the spilled register with a frame index, so
2391 // immediately deref all references to the spilled register.
2392 std::array<uint64_t, 1> Ops{{dwarf::DW_OP_deref}};
2393 for (const MachineOperand *Op : SpilledOperands) {
2394 unsigned OpIdx = MI.getDebugOperandIndex(Op);
2395 Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
2396 }
2397 }
2398 return Expr;
2399}
2401 Register SpillReg) {
2402 assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2404 for (const MachineOperand &Op : MI.getDebugOperandsForReg(SpillReg))
2405 SpillOperands.push_back(&Op);
2406 return computeExprForSpill(MI, SpillOperands);
2407}
2408
2411 const MachineInstr &Orig,
2412 int FrameIndex, Register SpillReg) {
2413 assert(!Orig.isDebugRef() &&
2414 "DBG_INSTR_REF should not reference a virtual register.");
2415 const DIExpression *Expr = computeExprForSpill(Orig, SpillReg);
2416 MachineInstrBuilder NewMI =
2417 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2418 // Non-Variadic Operands: Location, Offset, Variable, Expression
2419 // Variadic Operands: Variable, Expression, Locations...
2420 if (Orig.isNonListDebugValue())
2421 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2422 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2423 if (Orig.isDebugValueList()) {
2424 for (const MachineOperand &Op : Orig.debug_operands())
2425 if (Op.isReg() && Op.getReg() == SpillReg)
2426 NewMI.addFrameIndex(FrameIndex);
2427 else
2428 NewMI.add(MachineOperand(Op));
2429 }
2430 return NewMI;
2431}
2434 const MachineInstr &Orig, int FrameIndex,
2435 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2436 const DIExpression *Expr = computeExprForSpill(Orig, SpilledOperands);
2437 MachineInstrBuilder NewMI =
2438 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2439 // Non-Variadic Operands: Location, Offset, Variable, Expression
2440 // Variadic Operands: Variable, Expression, Locations...
2441 if (Orig.isNonListDebugValue())
2442 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2443 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2444 if (Orig.isDebugValueList()) {
2445 for (const MachineOperand &Op : Orig.debug_operands())
2446 if (is_contained(SpilledOperands, &Op))
2447 NewMI.addFrameIndex(FrameIndex);
2448 else
2449 NewMI.add(MachineOperand(Op));
2450 }
2451 return NewMI;
2452}
2453
2455 Register Reg) {
2456 const DIExpression *Expr = computeExprForSpill(Orig, Reg);
2457 if (Orig.isNonListDebugValue())
2459 for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2460 Op.ChangeToFrameIndex(FrameIndex);
2461 Orig.getDebugExpressionOp().setMetadata(Expr);
2462}
2463
2466 MachineInstr &MI = *this;
2467 if (!MI.getOperand(0).isReg())
2468 return;
2469
2471 for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2472 DI != DE; ++DI) {
2473 if (!DI->isDebugValue())
2474 return;
2475 if (DI->hasDebugOperandForReg(MI.getOperand(0).getReg()))
2476 DbgValues.push_back(&*DI);
2477 }
2478}
2479
2481 // Collect matching debug values.
2483
2484 if (!getOperand(0).isReg())
2485 return;
2486
2487 Register DefReg = getOperand(0).getReg();
2488 auto *MRI = getRegInfo();
2489 for (auto &MO : MRI->use_operands(DefReg)) {
2490 auto *DI = MO.getParent();
2491 if (!DI->isDebugValue())
2492 continue;
2493 if (DI->hasDebugOperandForReg(DefReg)) {
2494 DbgValues.push_back(DI);
2495 }
2496 }
2497
2498 // Propagate Reg to debug value instructions.
2499 for (auto *DBI : DbgValues)
2500 for (MachineOperand &Op : DBI->getDebugOperandsForReg(DefReg))
2501 Op.setReg(Reg);
2502}
2503
2505
2507 const MachineFrameInfo &MFI) {
2508 uint64_t Size = 0;
2509 for (const auto *A : Accesses) {
2510 if (MFI.isSpillSlotObjectIndex(
2511 cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2512 ->getFrameIndex())) {
2513 LocationSize S = A->getSize();
2514 if (!S.hasValue())
2516 Size += S.getValue();
2517 }
2518 }
2519 return Size;
2520}
2521
2522std::optional<LocationSize>
2524 int FI;
2525 if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2526 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2527 if (MFI.isSpillSlotObjectIndex(FI))
2528 return (*memoperands_begin())->getSize();
2529 }
2530 return std::nullopt;
2531}
2532
2533std::optional<LocationSize>
2535 MMOList Accesses;
2536 if (TII->hasStoreToStackSlot(*this, Accesses))
2537 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2538 return std::nullopt;
2539}
2540
2541std::optional<LocationSize>
2543 int FI;
2544 if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2545 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2546 if (MFI.isSpillSlotObjectIndex(FI))
2547 return (*memoperands_begin())->getSize();
2548 }
2549 return std::nullopt;
2550}
2551
2552std::optional<LocationSize>
2554 MMOList Accesses;
2555 if (TII->hasLoadFromStackSlot(*this, Accesses))
2556 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2557 return std::nullopt;
2558}
2559
2561 if (DebugInstrNum == 0)
2562 DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2563 return DebugInstrNum;
2564}
2565
2567 if (DebugInstrNum == 0)
2568 DebugInstrNum = MF.getNewDebugInstrNum();
2569 return DebugInstrNum;
2570}
2571
2572std::tuple<LLT, LLT> MachineInstr::getFirst2LLTs() const {
2573 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2574 getRegInfo()->getType(getOperand(1).getReg()));
2575}
2576
2577std::tuple<LLT, LLT, LLT> MachineInstr::getFirst3LLTs() const {
2578 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2579 getRegInfo()->getType(getOperand(1).getReg()),
2580 getRegInfo()->getType(getOperand(2).getReg()));
2581}
2582
2583std::tuple<LLT, LLT, LLT, LLT> MachineInstr::getFirst4LLTs() const {
2584 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2585 getRegInfo()->getType(getOperand(1).getReg()),
2586 getRegInfo()->getType(getOperand(2).getReg()),
2587 getRegInfo()->getType(getOperand(3).getReg()));
2588}
2589
2590std::tuple<LLT, LLT, LLT, LLT, LLT> MachineInstr::getFirst5LLTs() const {
2591 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2592 getRegInfo()->getType(getOperand(1).getReg()),
2593 getRegInfo()->getType(getOperand(2).getReg()),
2594 getRegInfo()->getType(getOperand(3).getReg()),
2595 getRegInfo()->getType(getOperand(4).getReg()));
2596}
2597
2598std::tuple<Register, LLT, Register, LLT>
2600 Register Reg0 = getOperand(0).getReg();
2601 Register Reg1 = getOperand(1).getReg();
2602 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2603 getRegInfo()->getType(Reg1));
2604}
2605
2606std::tuple<Register, LLT, Register, LLT, Register, LLT>
2608 Register Reg0 = getOperand(0).getReg();
2609 Register Reg1 = getOperand(1).getReg();
2610 Register Reg2 = getOperand(2).getReg();
2611 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2612 getRegInfo()->getType(Reg1), Reg2,
2613 getRegInfo()->getType(Reg2));
2614}
2615
2616std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
2618 Register Reg0 = getOperand(0).getReg();
2619 Register Reg1 = getOperand(1).getReg();
2620 Register Reg2 = getOperand(2).getReg();
2621 Register Reg3 = getOperand(3).getReg();
2622 return std::tuple(
2623 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2624 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3));
2625}
2626
2628 LLT>
2630 Register Reg0 = getOperand(0).getReg();
2631 Register Reg1 = getOperand(1).getReg();
2632 Register Reg2 = getOperand(2).getReg();
2633 Register Reg3 = getOperand(3).getReg();
2634 Register Reg4 = getOperand(4).getReg();
2635 return std::tuple(
2636 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2637 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3),
2638 Reg4, getRegInfo()->getType(Reg4));
2639}
2640
2643 assert(InsertBefore != nullptr && "invalid iterator");
2644 assert(InsertBefore->getParent() == this &&
2645 "iterator points to operand of other inst");
2646 if (Ops.empty())
2647 return;
2648
2649 // Do one pass to untie operands.
2651 for (const MachineOperand &MO : operands()) {
2652 if (MO.isReg() && MO.isTied()) {
2653 unsigned OpNo = getOperandNo(&MO);
2654 unsigned TiedTo = findTiedOperandIdx(OpNo);
2655 TiedOpIndices[OpNo] = TiedTo;
2656 untieRegOperand(OpNo);
2657 }
2658 }
2659
2660 unsigned OpIdx = getOperandNo(InsertBefore);
2661 unsigned NumOperands = getNumOperands();
2662 unsigned OpsToMove = NumOperands - OpIdx;
2663
2665 MovingOps.reserve(OpsToMove);
2666
2667 for (unsigned I = 0; I < OpsToMove; ++I) {
2668 MovingOps.emplace_back(getOperand(OpIdx));
2669 removeOperand(OpIdx);
2670 }
2671 for (const MachineOperand &MO : Ops)
2672 addOperand(MO);
2673 for (const MachineOperand &OpMoved : MovingOps)
2674 addOperand(OpMoved);
2675
2676 // Re-tie operands.
2677 for (auto [Tie1, Tie2] : TiedOpIndices) {
2678 if (Tie1 >= OpIdx)
2679 Tie1 += Ops.size();
2680 if (Tie2 >= OpIdx)
2681 Tie2 += Ops.size();
2682 tieOperands(Tie1, Tie2);
2683 }
2684}
2685
2686bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
2687 assert(OpId && "expected non-zero operand id");
2688 assert(isInlineAsm() && "should only be used on inline asm");
2689
2690 if (!getOperand(OpId).isReg())
2691 return false;
2692
2693 const MachineOperand &MD = getOperand(OpId - 1);
2694 if (!MD.isImm())
2695 return false;
2696
2697 InlineAsm::Flag F(MD.getImm());
2698 if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
2699 return F.getRegMayBeFolded();
2700 return false;
2701}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
Definition: Compiler.h:622
This file contains the declarations for the subclasses of Constant, which represent the different fla...
uint64_t Size
#define Check(C,...)
Hexagon Common GEP
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
A set of register units.
Implement a low-level type suitable for MachineInstr level instruction selection.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
const unsigned TiedMax
static void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps, MachineRegisterInfo *MRI)
Move NumOps MachineOperands from Src to Dst, with support for overlapping ranges.
static LocationSize getSpillSlotSize(const MMOList &Accesses, const MachineFrameInfo &MFI)
static const DIExpression * computeExprForSpill(const MachineInstr &MI, const SmallVectorImpl< const MachineOperand * > &SpilledOperands)
Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI, BatchAAResults *AA, bool UseTBAA, const MachineMemOperand *MMOa, const MachineMemOperand *MMOb)
static void tryToGetTargetInfo(const MachineInstr &MI, const TargetRegisterInfo *&TRI, const MachineRegisterInfo *&MRI, const TargetIntrinsicInfo *&IntrinsicInfo, const TargetInstrInfo *&TII)
static const MachineFunction * getMFIfAvailable(const MachineInstr &MI)
static bool hasIdenticalMMOs(ArrayRef< MachineMemOperand * > LHS, ArrayRef< MachineMemOperand * > RHS)
Check to see if the MMOs pointed to by the two MemRefs arrays are identical.
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
This file contains the declarations for metadata subclasses.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static cl::opt< bool > UseTBAA("use-tbaa-in-sched-mi", cl::Hidden, cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"))
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:39
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:198
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
bool isNoAlias(const MemoryLocation &LocA, const MemoryLocation &LocB)
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
DWARF expression.
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool isEqualExpression(const DIExpression *FirstExpr, bool FirstIndirect, const DIExpression *SecondExpr, bool SecondIndirect)
Determines whether two debug values should produce equivalent DWARF expressions, using their DIExpres...
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
This class represents an Operation in the Expression.
bool print(raw_ostream &OS, DIDumpOptions DumpOpts, const DWARFExpression *Expr, DWARFUnit *U) const
A debug info location.
Definition: DebugLoc.h:33
bool hasTrivialDestructor() const
Check whether this has a trivial destructor.
Definition: DebugLoc.h:69
Diagnostic information for inline asm reporting.
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:205
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:933
bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const override
Check if the instruction or the bundle of instructions has store to stack slots.
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const override
Check if the instruction or the bundle of instructions has load from stack slots.
This instruction compares its operands according to the predicate given to the constructor.
static StringRef getMemConstraintName(ConstraintCode C)
Definition: InlineAsm.h:467
constexpr bool isValid() const
Definition: LowLevelType.h:145
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:52
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
A set of register units used to track register liveness.
Definition: LiveRegUnits.h:30
bool hasValue() const
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
bool isScalable() const
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Definition: MCInstrDesc.h:248
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
Definition: MCInstrDesc.h:579
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:338
unsigned short Opcode
Definition: MCInstrDesc.h:205
bool isVariadic() const
Return true if this instruction can have a variable number of operands.
Definition: MCInstrDesc.h:261
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
Definition: MCInstrDesc.h:565
MCRegAliasIterator enumerates all registers aliasing Reg.
bool isSubRegister(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA.
bool isSuperRegister(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a super-register of RegA.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:41
Metadata node.
Definition: Metadata.h:1073
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1434
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1440
bool isValid() const
isValid - Returns true until all the operands have been visited.
MachineInstr * remove_instr(MachineInstr *I)
Remove the possibly bundled instruction from the instruction list without deleting it.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
instr_iterator erase_instr(MachineInstr *I)
Remove an instruction from the instruction list and delete it.
void printAsOperand(raw_ostream &OS, bool PrintType=true) const
MachineInstr * remove(MachineInstr *I)
Remove the unbundled instruction from the instruction list without deleting it.
void print(raw_ostream &OS, const SlotIndexes *=nullptr, bool IsStandalone=true) const
Instructions::iterator instr_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isSpillSlotObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a spill slot.
MachineInstr::ExtraInfo * createMIExtraInfo(ArrayRef< MachineMemOperand * > MMOs, MCSymbol *PreInstrSymbol=nullptr, MCSymbol *PostInstrSymbol=nullptr, MDNode *HeapAllocMarker=nullptr, MDNode *PCSections=nullptr, uint32_t CFIType=0, MDNode *MMRAs=nullptr)
Allocate and construct an extra info structure for a MachineInstr.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void deallocateOperandArray(OperandCapacity Cap, MachineOperand *Array)
Dellocate an array of MachineOperands and recycle the memory.
MachineOperand * allocateOperandArray(OperandCapacity Cap)
Allocate an array of MachineOperands.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void handleChangeDesc(MachineInstr &MI, const MCInstrDesc &TID)
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
Definition: MachineInstr.h:71
bool mayRaiseFPException() const
Return true if this instruction could possibly raise a floating-point exception.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:577
void setRegisterDefReadUndef(Register Reg, bool IsUndef=true)
Mark all subregister defs of register Reg with the undef flag.
static iterator_range< filter_iterator< Operand *, std::function< bool(Operand &Op)> > > getDebugOperandsForReg(Instruction *MI, Register Reg)
Returns a range of all of the operands that correspond to a debug use of Reg.
Definition: MachineInstr.h:618
bool isDebugValueList() const
void bundleWithPred()
Bundle this instruction with its predecessor.
bool isPosition() const
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
Definition: MachineInstr.h:983
std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst5RegLLTs() const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
void setCFIType(MachineFunction &MF, uint32_t Type)
Set the CFI type for the instruction.
MachineInstr * removeFromParent()
Unlink 'this' from the containing basic block, and return it without deleting it.
iterator_range< mop_iterator > debug_operands()
Returns a range over all operands that are used to determine the variable location for this DBG_VALUE...
Definition: MachineInstr.h:715
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:349
MDNode * getMMRAMetadata() const
Helper to extract mmra.op metadata.
Definition: MachineInstr.h:873
void bundleWithSucc()
Bundle this instruction with its successor.
uint32_t getCFIType() const
Helper to extract a CFI type hash if one has been added.
Definition: MachineInstr.h:882
bool isDebugLabel() const
void setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just prior to the instruction itself.
bool hasProperty(unsigned MCFlag, QueryType Type=AnyInBundle) const
Return true if the instruction (or in the case of a bundle, the instructions inside the bundle) has t...
Definition: MachineInstr.h:905
bool isDereferenceableInvariantLoad() const
Return true if this load instruction never traps and points to a memory location whose value doesn't ...
void setFlags(unsigned flags)
Definition: MachineInstr.h:412
QueryType
API for querying MachineInstr properties.
Definition: MachineInstr.h:894
void addImplicitDefUseOperands(MachineFunction &MF)
Add all implicit def and use operands to this instruction.
std::tuple< LLT, LLT, LLT, LLT, LLT > getFirst5LLTs() const
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:958
std::tuple< Register, LLT, Register, LLT, Register, LLT > getFirst3RegLLTs() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:399
uint32_t mergeFlagsWith(const MachineInstr &Other) const
Return the MIFlags which represent both MachineInstrs.
const MachineOperand & getDebugExpressionOp() const
Return the operand for the complex address expression referenced by this DBG_VALUE instruction.
std::pair< bool, bool > readsWritesVirtualRegister(Register Reg, SmallVectorImpl< unsigned > *Ops=nullptr) const
Return a pair of bools (reads, writes) indicating if this instruction reads or writes Reg.
Register isConstantValuePHI() const
If the specified instruction is a PHI that always merges together the same virtual register,...
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx=nullptr) const
Return true if the use operand of the specified index is tied to a def operand.
bool allImplicitDefsAreDead() const
Return true if all the implicit defs of this instruction are dead.
void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's memory reference descriptor list and replace ours with it.
const TargetRegisterClass * getRegClassConstraintEffectForVReg(Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ExploreBundle=false) const
Applies the constraints (def/use) implied by this MI on Reg to the given CurRC.
bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
bool mayAlias(BatchAAResults *AA, const MachineInstr &Other, bool UseTBAA) const
Returns true if this instruction's memory access aliases the memory access of Other.
bool isBundle() const
bool isDebugInstr() const
unsigned getNumDebugOperands() const
Returns the total number of operands which are debug locations.
Definition: MachineInstr.h:583
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:580
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
MachineInstr * removeFromBundle()
Unlink this instruction from its basic block and return it without deleting it.
void dumpr(const MachineRegisterInfo &MRI, unsigned MaxDepth=UINT_MAX) const
Print on dbgs() the current instruction and the instructions defining its operands and so on until we...
void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
bool isDebugValueLike() const
bool isInlineAsm() const
bool memoperands_empty() const
Return true if we don't have any memory operands which described the memory access done by this instr...
Definition: MachineInstr.h:820
mmo_iterator memoperands_end() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:815
bool isDebugRef() const
void collectDebugValues(SmallVectorImpl< MachineInstr * > &DbgValues)
Scan instructions immediately following MI and collect any matching DBG_VALUEs.
std::optional< LocationSize > getRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a restore instruction.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
Definition: MachineInstr.h:783
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
void setMemRefs(MachineFunction &MF, ArrayRef< MachineMemOperand * > MemRefs)
Assign this MachineInstr's memory reference descriptor list.
bool wouldBeTriviallyDead() const
Return true if this instruction would be trivially dead if all of its defined registers were dead.
bool isBundledWithPred() const
Return true if this instruction is part of a bundle, and it is not the first instruction in the bundl...
Definition: MachineInstr.h:480
std::tuple< LLT, LLT > getFirst2LLTs() const
std::optional< LocationSize > getFoldedSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded spill instruction.
void unbundleFromPred()
Break bundle above this instruction.
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
bool isStackAligningInlineAsm() const
void dropMemRefs(MachineFunction &MF)
Clear this MachineInstr's memory reference descriptor list.
int findRegisterUseOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false) const
Returns the operand index that is a use of the specific register or -1 if it is not found.
MDNode * getPCSections() const
Helper to extract PCSections metadata target sections.
Definition: MachineInstr.h:863
bool isCFIInstruction() const
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:574
unsigned getBundleSize() const
Return the number of instructions inside the MI bundle, excluding the bundle header.
void cloneMergedMemRefs(MachineFunction &MF, ArrayRef< const MachineInstr * > MIs)
Clone the merge of multiple MachineInstrs' memory reference descriptors list and replace ours with it...
bool isCandidateForAdditionalCallInfo(QueryType Type=IgnoreBundle) const
Return true if this is a call instruction that may have an additional information associated with it.
std::tuple< Register, LLT, Register, LLT, Register, LLT, Register, LLT > getFirst4RegLLTs() const
std::tuple< Register, LLT, Register, LLT > getFirst2RegLLTs() const
unsigned getNumMemOperands() const
Return the number of memory operands.
Definition: MachineInstr.h:826
void clearFlag(MIFlag Flag)
clearFlag - Clear a MI flag.
Definition: MachineInstr.h:421
std::optional< LocationSize > getFoldedRestoreSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a folded restore instruction.
const TargetRegisterClass * getRegClassConstraintEffect(unsigned OpIdx, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Applies the constraints (def/use) implied by the OpIdx operand to the given CurRC.
bool isOperandSubregIdx(unsigned OpIdx) const
Return true if operand OpIdx is a subregister index.
Definition: MachineInstr.h:664
InlineAsm::AsmDialect getInlineAsmDialect() const
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool isEquivalentDbgInstr(const MachineInstr &Other) const
Returns true if this instruction is a debug instruction that represents an identical debug value to O...
const DILabel * getDebugLabel() const
Return the debug label referenced by this DBG_LABEL instruction.
void untieRegOperand(unsigned OpIdx)
Break any tie involving OpIdx.
static uint32_t copyFlagsFromInstruction(const Instruction &I)
void insert(mop_iterator InsertBefore, ArrayRef< MachineOperand > Ops)
Inserts Ops BEFORE It. Can untie/retie tied operands.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool isJumpTableDebugInfo() const
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
void eraseFromBundle()
Unlink 'this' from its basic block and delete it.
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:693
void setHeapAllocMarker(MachineFunction &MF, MDNode *MD)
Set a marker on instructions that denotes where we should create and emit heap alloc site labels.
const DILocalVariable * getDebugVariable() const
Return the debug variable referenced by this DBG_VALUE instruction.
bool hasComplexRegisterTies() const
Return true when an instruction has tied register that can't be determined by the instruction's descr...
LLT getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes, const MachineRegisterInfo &MRI) const
Debugging supportDetermine the generic type to be printed (if needed) on uses and defs.
bool isLifetimeMarker() const
void substituteRegister(Register FromReg, Register ToReg, unsigned SubIdx, const TargetRegisterInfo &RegInfo)
Replace all occurrences of FromReg with ToReg:SubIdx, properly composing subreg indices where necessa...
unsigned findTiedOperandIdx(unsigned OpIdx) const
Given the index of a tied register operand, find the operand it is tied to.
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:808
void cloneInstrSymbols(MachineFunction &MF, const MachineInstr &MI)
Clone another MachineInstr's pre- and post- instruction symbols and replace ours with it.
void changeDebugValuesDefReg(Register Reg)
Find all DBG_VALUEs that point to the register def in this instruction and point them to Reg instead.
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
void emitGenericError(const Twine &ErrMsg) const
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DIExpression * getDebugExpression() const
Return the complex address expression referenced by this DBG_VALUE instruction.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:790
void print(raw_ostream &OS, bool IsStandalone=true, bool SkipOpers=false, bool SkipDebugLoc=false, bool AddNewLine=true, const TargetInstrInfo *TII=nullptr) const
Print this MI to OS.
bool isNonListDebugValue() const
bool isLoadFoldBarrier() const
Returns true if it is illegal to fold a load across this instruction.
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
void setFlag(MIFlag Flag)
Set a MI flag.
Definition: MachineInstr.h:406
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:501
bool isDead(const MachineRegisterInfo &MRI, LiveRegUnits *LivePhysRegs=nullptr) const
Check whether an MI is dead.
std::tuple< LLT, LLT, LLT > getFirst3LLTs() const
const MachineOperand & getDebugVariableOp() const
Return the operand for the debug variable referenced by this DBG_VALUE instruction.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void setPhysRegsDeadExcept(ArrayRef< Register > UsedRegs, const TargetRegisterInfo &TRI)
Mark every physreg used by this instruction as dead except those in the UsedRegs list.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
MCSymbol * getPreInstrSymbol() const
Helper to extract a pre-instruction symbol if one has been added.
Definition: MachineInstr.h:829
bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
bool isDebugValue() const
const MachineOperand & getDebugOffset() const
Return the operand containing the offset to be used if this DBG_VALUE instruction is indirect; will b...
Definition: MachineInstr.h:506
MachineOperand & getDebugOperand(unsigned Index)
Definition: MachineInstr.h:596
std::optional< LocationSize > getSpillSize(const TargetInstrInfo *TII) const
Return a valid size if the instruction is a spill instruction.
iterator_range< mop_iterator > implicit_operands()
Definition: MachineInstr.h:707
bool isBundledWithSucc() const
Return true if this instruction is part of a bundle, and it is not the last instruction in the bundle...
Definition: MachineInstr.h:484
void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
MDNode * getHeapAllocMarker() const
Helper to extract a heap alloc marker if one has been added.
Definition: MachineInstr.h:853
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
std::tuple< LLT, LLT, LLT, LLT > getFirst4LLTs() const
bool isPHI() const
void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
void clearRegisterKills(Register Reg, const TargetRegisterInfo *RegInfo)
Clear all kill flags affecting Reg.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:587
void emitInlineAsmError(const Twine &ErrMsg) const
Emit an error referring to the source location of this instruction.
uint32_t getFlags() const
Return the MI flags bitvector.
Definition: MachineInstr.h:394
bool isPseudoProbe() const
bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
bool shouldUpdateAdditionalCallInfo() const
Return true if copying, moving, or erasing this instruction requires updating additional call info (s...
MCSymbol * getPostInstrSymbol() const
Helper to extract a post-instruction symbol if one has been added.
Definition: MachineInstr.h:841
void unbundleFromSucc()
Break bundle below this instruction.
iterator_range< filtered_mop_iterator > all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
Definition: MachineInstr.h:764
void clearKillInfo()
Clears kill flags on all operands.
bool isDebugEntryValue() const
A DBG_VALUE is an entry value iff its debug expression contains the DW_OP_LLVM_entry_value operation.
bool isIndirectDebugValue() const
A DBG_VALUE is indirect iff the location operand is a register and the offset operand is an immediate...
unsigned getNumDefs() const
Returns the total number of definitions.
Definition: MachineInstr.h:646
void setPCSections(MachineFunction &MF, MDNode *MD)
bool isKill() const
const MDNode * getLocCookieMD() const
For inline asm, get the !srcloc metadata node if we have it, and decode the loc cookie from it.
int findRegisterDefOperandIdx(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false) const
Returns the operand index that is a def of the specified register or -1 if it is not found.
bool isFakeUse() const
bool isVariadic(QueryType Type=IgnoreBundle) const
Return true if this instruction can have a variable number of operands.
Definition: MachineInstr.h:926
int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo=nullptr) const
Find the index of the flag word operand that corresponds to operand OpIdx on an inline asm instructio...
bool allDefsAreDead() const
Return true if all the defs of this instruction are dead.
void setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs)
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
void moveBefore(MachineInstr *MovePos)
Move the instruction before MovePos.
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
bool mayFoldInlineAsmRegOp(unsigned OpId) const
Returns true if the register operand can be folded with a load or store into a frame index.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
bool isUnordered() const
Returns true if this memory operation doesn't have any ordering constraints other than normal aliasin...
AAMDNodes getAAInfo() const
Return the AA tags for the memory reference.
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
void substVirtReg(Register Reg, unsigned SubIdx, const TargetRegisterInfo &)
substVirtReg - Substitute the current register with the virtual subregister Reg:SubReg.
static void printSubRegIdx(raw_ostream &OS, uint64_t Index, const TargetRegisterInfo *TRI)
Print a subreg index operand.
int64_t getImm() const
bool isImplicit() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
const MDNode * getMetadata() const
void setIsDead(bool Val=true)
void setMetadata(const MDNode *MD)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
bool isMetadata() const
isMetadata - Tests if this is a MO_Metadata operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
void substPhysReg(MCRegister Reg, const TargetRegisterInfo &)
substPhysReg - Substitute the current register with the physical register Reg, taking any existing Su...
void setIsEarlyClobber(bool Val=true)
void setIsUndef(bool Val=true)
void setIsDebug(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
static void printSymbol(raw_ostream &OS, MCSymbol &Sym)
Print a MCSymbol as an operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Representation for a specific memory location.
void printAsOperand(raw_ostream &OS, const Module *M=nullptr) const
Print as operand.
Definition: AsmWriter.cpp:5250
Manage lifetime of a slot tracker for printing IR.
void incorporateFunction(const Function &F)
Incorporate the given function.
Definition: AsmWriter.cpp:904
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:77
An or instruction, which can be marked as "disjoint", indicating that the inputs don't have a 1 in th...
Definition: InstrTypes.h:400
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
Definition: Operator.h:155
Instruction that can have a nneg flag (zext/uitofp).
Definition: InstrTypes.h:636
Special value supplied for machine level alias analysis.
virtual bool mayAlias(const MachineFrameInfo *) const
Return true if the memory pointed to by this PseudoSourceValue can ever alias an LLVM IR Value.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition: Register.h:95
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
SmallBitVector & set()
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:363
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void reserve(size_type N)
Definition: SmallVector.h:663
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
static unsigned getNextMetaArgIdx(const MachineInstr *MI, unsigned CurIdx)
Get index of next meta operand.
Definition: StackMaps.cpp:170
MI-level Statepoint operands.
Definition: StackMaps.h:158
int getFirstGCPtrIdx()
Get index of first GC pointer operand of -1 if there are none.
Definition: StackMaps.cpp:124
TargetInstrInfo - Interface to description of machine instruction set.
TargetIntrinsicInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
MCRegister getSubReg(MCRegister Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo.
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
virtual const TargetInstrInfo * getInstrInfo() const
This class represents a truncation of integer types.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
LLVM Value Representation.
Definition: Value.h:74
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
formatted_raw_ostream & PadToColumn(unsigned NewCol)
PadToColumn - Align the output to some column number.
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
MCInstrDesc const & getDesc(MCInstrInfo const &MCII, MCInst const &MCI)
@ UnmodeledSideEffects
Definition: MCInstrDesc.h:173
constexpr double e
Definition: MathExtras.h:48
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
hash_code hash_value(const FixedPointSemantics &Val)
Definition: APFixedPoint.h:136
formatted_raw_ostream & fdbgs()
fdbgs() - This returns a reference to a formatted_raw_ostream for debug output.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex, Register Reg)
Update a DBG_VALUE whose value has been spilled to FrameIndex.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1746
iterator_range< pointee_iterator< WrappedIteratorT > > make_pointee_range(RangeT &&Range)
Definition: iterator.h:336
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
@ Other
Any other memory.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1873
MachineInstr * buildDbgValueForSpill(MachineBasicBlock &BB, MachineBasicBlock::iterator I, const MachineInstr &Orig, int FrameIndex, Register SpillReg)
Clone a DBG_VALUE whose value has been spilled to FrameIndex.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
hash_code hash_combine_range(InputIteratorT first, InputIteratorT last)
Compute a hash_code for a sequence of values.
Definition: Hashing.h:468
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
#define N
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:764
static unsigned getHashValue(const MachineInstr *const &MI)