LLVM 20.0.0git
VEInstrInfo.cpp
Go to the documentation of this file.
1//===-- VEInstrInfo.cpp - VE Instruction Information ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the VE implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "VEInstrInfo.h"
14#include "VE.h"
16#include "VESubtarget.h"
17#include "llvm/ADT/STLExtras.h"
24#include "llvm/Support/Debug.h"
26
27#define DEBUG_TYPE "ve-instr-info"
28
29using namespace llvm;
30
31#define GET_INSTRINFO_CTOR_DTOR
32#include "VEGenInstrInfo.inc"
33
34// Pin the vtable to this file.
35void VEInstrInfo::anchor() {}
36
38 : VEGenInstrInfo(VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI() {}
39
40static bool IsIntegerCC(unsigned CC) { return (CC < VECC::CC_AF); }
41
43 switch (CC) {
44 case VECC::CC_IG:
45 return VECC::CC_ILE;
46 case VECC::CC_IL:
47 return VECC::CC_IGE;
48 case VECC::CC_INE:
49 return VECC::CC_IEQ;
50 case VECC::CC_IEQ:
51 return VECC::CC_INE;
52 case VECC::CC_IGE:
53 return VECC::CC_IL;
54 case VECC::CC_ILE:
55 return VECC::CC_IG;
56 case VECC::CC_AF:
57 return VECC::CC_AT;
58 case VECC::CC_G:
59 return VECC::CC_LENAN;
60 case VECC::CC_L:
61 return VECC::CC_GENAN;
62 case VECC::CC_NE:
63 return VECC::CC_EQNAN;
64 case VECC::CC_EQ:
65 return VECC::CC_NENAN;
66 case VECC::CC_GE:
67 return VECC::CC_LNAN;
68 case VECC::CC_LE:
69 return VECC::CC_GNAN;
70 case VECC::CC_NUM:
71 return VECC::CC_NAN;
72 case VECC::CC_NAN:
73 return VECC::CC_NUM;
74 case VECC::CC_GNAN:
75 return VECC::CC_LE;
76 case VECC::CC_LNAN:
77 return VECC::CC_GE;
78 case VECC::CC_NENAN:
79 return VECC::CC_EQ;
80 case VECC::CC_EQNAN:
81 return VECC::CC_NE;
82 case VECC::CC_GENAN:
83 return VECC::CC_L;
84 case VECC::CC_LENAN:
85 return VECC::CC_G;
86 case VECC::CC_AT:
87 return VECC::CC_AF;
88 case VECC::UNKNOWN:
89 return VECC::UNKNOWN;
90 }
91 llvm_unreachable("Invalid cond code");
92}
93
94// Treat a branch relative long always instruction as unconditional branch.
95// For example, br.l.t and br.l.
96static bool isUncondBranchOpcode(int Opc) {
97 using namespace llvm::VE;
98
99#define BRKIND(NAME) (Opc == NAME##a || Opc == NAME##a_nt || Opc == NAME##a_t)
100 // VE has other branch relative always instructions for word/double/float,
101 // but we use only long branches in our lower. So, check it here.
102 assert(!BRKIND(BRCFW) && !BRKIND(BRCFD) && !BRKIND(BRCFS) &&
103 "Branch relative word/double/float always instructions should not be "
104 "used!");
105 return BRKIND(BRCFL);
106#undef BRKIND
107}
108
109// Treat branch relative conditional as conditional branch instructions.
110// For example, brgt.l.t and brle.s.nt.
111static bool isCondBranchOpcode(int Opc) {
112 using namespace llvm::VE;
113
114#define BRKIND(NAME) \
115 (Opc == NAME##rr || Opc == NAME##rr_nt || Opc == NAME##rr_t || \
116 Opc == NAME##ir || Opc == NAME##ir_nt || Opc == NAME##ir_t)
117 return BRKIND(BRCFL) || BRKIND(BRCFW) || BRKIND(BRCFD) || BRKIND(BRCFS);
118#undef BRKIND
119}
120
121// Treat branch long always instructions as indirect branch.
122// For example, b.l.t and b.l.
123static bool isIndirectBranchOpcode(int Opc) {
124 using namespace llvm::VE;
125
126#define BRKIND(NAME) \
127 (Opc == NAME##ari || Opc == NAME##ari_nt || Opc == NAME##ari_t)
128 // VE has other branch always instructions for word/double/float, but
129 // we use only long branches in our lower. So, check it here.
130 assert(!BRKIND(BCFW) && !BRKIND(BCFD) && !BRKIND(BCFS) &&
131 "Branch word/double/float always instructions should not be used!");
132 return BRKIND(BCFL);
133#undef BRKIND
134}
135
138 Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(0).getImm()));
139 Cond.push_back(LastInst->getOperand(1));
140 Cond.push_back(LastInst->getOperand(2));
141 Target = LastInst->getOperand(3).getMBB();
142}
143
145 MachineBasicBlock *&FBB,
147 bool AllowModify) const {
149 if (I == MBB.end())
150 return false;
151
152 if (!isUnpredicatedTerminator(*I))
153 return false;
154
155 // Get the last instruction in the block.
156 MachineInstr *LastInst = &*I;
157 unsigned LastOpc = LastInst->getOpcode();
158
159 // If there is only one terminator instruction, process it.
160 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
161 if (isUncondBranchOpcode(LastOpc)) {
162 TBB = LastInst->getOperand(0).getMBB();
163 return false;
164 }
165 if (isCondBranchOpcode(LastOpc)) {
166 // Block ends with fall-through condbranch.
167 parseCondBranch(LastInst, TBB, Cond);
168 return false;
169 }
170 return true; // Can't handle indirect branch.
171 }
172
173 // Get the instruction before it if it is a terminator.
174 MachineInstr *SecondLastInst = &*I;
175 unsigned SecondLastOpc = SecondLastInst->getOpcode();
176
177 // If AllowModify is true and the block ends with two or more unconditional
178 // branches, delete all but the first unconditional branch.
179 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
180 while (isUncondBranchOpcode(SecondLastOpc)) {
181 LastInst->eraseFromParent();
182 LastInst = SecondLastInst;
183 LastOpc = LastInst->getOpcode();
184 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
185 // Return now the only terminator is an unconditional branch.
186 TBB = LastInst->getOperand(0).getMBB();
187 return false;
188 }
189 SecondLastInst = &*I;
190 SecondLastOpc = SecondLastInst->getOpcode();
191 }
192 }
193
194 // If there are three terminators, we don't know what sort of block this is.
195 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
196 return true;
197
198 // If the block ends with a B and a Bcc, handle it.
199 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
200 parseCondBranch(SecondLastInst, TBB, Cond);
201 FBB = LastInst->getOperand(0).getMBB();
202 return false;
203 }
204
205 // If the block ends with two unconditional branches, handle it. The second
206 // one is not executed.
207 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
208 TBB = SecondLastInst->getOperand(0).getMBB();
209 return false;
210 }
211
212 // ...likewise if it ends with an indirect branch followed by an unconditional
213 // branch.
214 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
215 I = LastInst;
216 if (AllowModify)
217 I->eraseFromParent();
218 return true;
219 }
220
221 // Otherwise, can't handle this.
222 return true;
223}
224
229 const DebugLoc &DL, int *BytesAdded) const {
230 assert(TBB && "insertBranch must not be told to insert a fallthrough");
231 assert((Cond.size() == 3 || Cond.size() == 0) &&
232 "VE branch conditions should have three component!");
233 assert(!BytesAdded && "code size not handled");
234 if (Cond.empty()) {
235 // Uncondition branch
236 assert(!FBB && "Unconditional branch with multiple successors!");
237 BuildMI(&MBB, DL, get(VE::BRCFLa_t))
238 .addMBB(TBB);
239 return 1;
240 }
241
242 // Conditional branch
243 // (BRCFir CC sy sz addr)
244 assert(Cond[0].isImm() && Cond[2].isReg() && "not implemented");
245
246 unsigned opc[2];
249 const MachineRegisterInfo &MRI = MF->getRegInfo();
250 Register Reg = Cond[2].getReg();
251 if (IsIntegerCC(Cond[0].getImm())) {
252 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
253 opc[0] = VE::BRCFWir;
254 opc[1] = VE::BRCFWrr;
255 } else {
256 opc[0] = VE::BRCFLir;
257 opc[1] = VE::BRCFLrr;
258 }
259 } else {
260 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
261 opc[0] = VE::BRCFSir;
262 opc[1] = VE::BRCFSrr;
263 } else {
264 opc[0] = VE::BRCFDir;
265 opc[1] = VE::BRCFDrr;
266 }
267 }
268 if (Cond[1].isImm()) {
269 BuildMI(&MBB, DL, get(opc[0]))
270 .add(Cond[0]) // condition code
271 .add(Cond[1]) // lhs
272 .add(Cond[2]) // rhs
273 .addMBB(TBB);
274 } else {
275 BuildMI(&MBB, DL, get(opc[1]))
276 .add(Cond[0])
277 .add(Cond[1])
278 .add(Cond[2])
279 .addMBB(TBB);
280 }
281
282 if (!FBB)
283 return 1;
284
285 BuildMI(&MBB, DL, get(VE::BRCFLa_t))
286 .addMBB(FBB);
287 return 2;
288}
289
291 int *BytesRemoved) const {
292 assert(!BytesRemoved && "code size not handled");
293
295 unsigned Count = 0;
296 while (I != MBB.begin()) {
297 --I;
298
299 if (I->isDebugValue())
300 continue;
301
302 if (!isUncondBranchOpcode(I->getOpcode()) &&
303 !isCondBranchOpcode(I->getOpcode()))
304 break; // Not a branch
305
306 I->eraseFromParent();
307 I = MBB.end();
308 ++Count;
309 }
310 return Count;
311}
312
315 VECC::CondCode CC = static_cast<VECC::CondCode>(Cond[0].getImm());
317 return false;
318}
319
320static bool IsAliasOfSX(Register Reg) {
321 return VE::I32RegClass.contains(Reg) || VE::I64RegClass.contains(Reg) ||
322 VE::F32RegClass.contains(Reg);
323}
324
327 MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
328 const MCInstrDesc &MCID, unsigned int NumSubRegs,
329 const unsigned *SubRegIdx,
330 const TargetRegisterInfo *TRI) {
331 MachineInstr *MovMI = nullptr;
332
333 for (unsigned Idx = 0; Idx != NumSubRegs; ++Idx) {
334 Register SubDest = TRI->getSubReg(DestReg, SubRegIdx[Idx]);
335 Register SubSrc = TRI->getSubReg(SrcReg, SubRegIdx[Idx]);
336 assert(SubDest && SubSrc && "Bad sub-register");
337
338 if (MCID.getOpcode() == VE::ORri) {
339 // generate "ORri, dest, src, 0" instruction.
341 BuildMI(MBB, I, DL, MCID, SubDest).addReg(SubSrc).addImm(0);
342 MovMI = MIB.getInstr();
343 } else if (MCID.getOpcode() == VE::ANDMmm) {
344 // generate "ANDM, dest, vm0, src" instruction.
346 BuildMI(MBB, I, DL, MCID, SubDest).addReg(VE::VM0).addReg(SubSrc);
347 MovMI = MIB.getInstr();
348 } else {
349 llvm_unreachable("Unexpected reg-to-reg copy instruction");
350 }
351 }
352 // Add implicit super-register defs and kills to the last MovMI.
353 MovMI->addRegisterDefined(DestReg, TRI);
354 if (KillSrc)
355 MovMI->addRegisterKilled(SrcReg, TRI, true);
356}
357
360 MCRegister DestReg, MCRegister SrcReg,
361 bool KillSrc, bool RenamableDest,
362 bool RenamableSrc) const {
363
364 if (IsAliasOfSX(SrcReg) && IsAliasOfSX(DestReg)) {
365 BuildMI(MBB, I, DL, get(VE::ORri), DestReg)
366 .addReg(SrcReg, getKillRegState(KillSrc))
367 .addImm(0);
368 } else if (VE::V64RegClass.contains(DestReg, SrcReg)) {
369 // Generate following instructions
370 // %sw16 = LEA32zii 256
371 // VORmvl %dest, (0)1, %src, %sw16
372 // TODO: reuse a register if vl is already assigned to a register
373 // FIXME: it would be better to scavenge a register here instead of
374 // reserving SX16 all of the time.
376 Register TmpReg = VE::SX16;
377 Register SubTmp = TRI->getSubReg(TmpReg, VE::sub_i32);
378 BuildMI(MBB, I, DL, get(VE::LEAzii), TmpReg)
379 .addImm(0)
380 .addImm(0)
381 .addImm(256);
382 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(VE::VORmvl), DestReg)
383 .addImm(M1(0)) // Represent (0)1.
384 .addReg(SrcReg, getKillRegState(KillSrc))
385 .addReg(SubTmp, getKillRegState(true));
386 MIB.getInstr()->addRegisterKilled(TmpReg, TRI, true);
387 } else if (VE::VMRegClass.contains(DestReg, SrcReg)) {
388 BuildMI(MBB, I, DL, get(VE::ANDMmm), DestReg)
389 .addReg(VE::VM0)
390 .addReg(SrcReg, getKillRegState(KillSrc));
391 } else if (VE::VM512RegClass.contains(DestReg, SrcReg)) {
392 // Use two instructions.
393 const unsigned SubRegIdx[] = {VE::sub_vm_even, VE::sub_vm_odd};
394 unsigned int NumSubRegs = 2;
395 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ANDMmm),
396 NumSubRegs, SubRegIdx, &getRegisterInfo());
397 } else if (VE::F128RegClass.contains(DestReg, SrcReg)) {
398 // Use two instructions.
399 const unsigned SubRegIdx[] = {VE::sub_even, VE::sub_odd};
400 unsigned int NumSubRegs = 2;
401 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ORri),
402 NumSubRegs, SubRegIdx, &getRegisterInfo());
403 } else {
405 dbgs() << "Impossible reg-to-reg copy from " << printReg(SrcReg, TRI)
406 << " to " << printReg(DestReg, TRI) << "\n";
407 llvm_unreachable("Impossible reg-to-reg copy");
408 }
409}
410
411/// isLoadFromStackSlot - If the specified machine instruction is a direct
412/// load from a stack slot, return the virtual or physical register number of
413/// the destination along with the FrameIndex of the loaded stack slot. If
414/// not, return 0. This predicate must return 0 if the instruction has
415/// any side effects other than loading from the stack slot.
417 int &FrameIndex) const {
418 if (MI.getOpcode() == VE::LDrii || // I64
419 MI.getOpcode() == VE::LDLSXrii || // I32
420 MI.getOpcode() == VE::LDUrii || // F32
421 MI.getOpcode() == VE::LDQrii || // F128 (pseudo)
422 MI.getOpcode() == VE::LDVMrii || // VM (pseudo)
423 MI.getOpcode() == VE::LDVM512rii // VM512 (pseudo)
424 ) {
425 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
426 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).isImm() &&
427 MI.getOperand(3).getImm() == 0) {
428 FrameIndex = MI.getOperand(1).getIndex();
429 return MI.getOperand(0).getReg();
430 }
431 }
432 return 0;
433}
434
435/// isStoreToStackSlot - If the specified machine instruction is a direct
436/// store to a stack slot, return the virtual or physical register number of
437/// the source reg along with the FrameIndex of the loaded stack slot. If
438/// not, return 0. This predicate must return 0 if the instruction has
439/// any side effects other than storing to the stack slot.
441 int &FrameIndex) const {
442 if (MI.getOpcode() == VE::STrii || // I64
443 MI.getOpcode() == VE::STLrii || // I32
444 MI.getOpcode() == VE::STUrii || // F32
445 MI.getOpcode() == VE::STQrii || // F128 (pseudo)
446 MI.getOpcode() == VE::STVMrii || // VM (pseudo)
447 MI.getOpcode() == VE::STVM512rii // VM512 (pseudo)
448 ) {
449 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
450 MI.getOperand(1).getImm() == 0 && MI.getOperand(2).isImm() &&
451 MI.getOperand(2).getImm() == 0) {
452 FrameIndex = MI.getOperand(0).getIndex();
453 return MI.getOperand(3).getReg();
454 }
455 }
456 return 0;
457}
458
461 Register SrcReg, bool isKill, int FI,
462 const TargetRegisterClass *RC,
463 const TargetRegisterInfo *TRI,
464 Register VReg,
465 MachineInstr::MIFlag Flags) const {
466 DebugLoc DL;
467 if (I != MBB.end())
468 DL = I->getDebugLoc();
469
471 const MachineFrameInfo &MFI = MF->getFrameInfo();
474 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
475
476 // On the order of operands here: think "[FrameIdx + 0] = SrcReg".
477 if (RC == &VE::I64RegClass) {
478 BuildMI(MBB, I, DL, get(VE::STrii))
479 .addFrameIndex(FI)
480 .addImm(0)
481 .addImm(0)
482 .addReg(SrcReg, getKillRegState(isKill))
483 .addMemOperand(MMO);
484 } else if (RC == &VE::I32RegClass) {
485 BuildMI(MBB, I, DL, get(VE::STLrii))
486 .addFrameIndex(FI)
487 .addImm(0)
488 .addImm(0)
489 .addReg(SrcReg, getKillRegState(isKill))
490 .addMemOperand(MMO);
491 } else if (RC == &VE::F32RegClass) {
492 BuildMI(MBB, I, DL, get(VE::STUrii))
493 .addFrameIndex(FI)
494 .addImm(0)
495 .addImm(0)
496 .addReg(SrcReg, getKillRegState(isKill))
497 .addMemOperand(MMO);
498 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
499 BuildMI(MBB, I, DL, get(VE::STQrii))
500 .addFrameIndex(FI)
501 .addImm(0)
502 .addImm(0)
503 .addReg(SrcReg, getKillRegState(isKill))
504 .addMemOperand(MMO);
505 } else if (RC == &VE::VMRegClass) {
506 BuildMI(MBB, I, DL, get(VE::STVMrii))
507 .addFrameIndex(FI)
508 .addImm(0)
509 .addImm(0)
510 .addReg(SrcReg, getKillRegState(isKill))
511 .addMemOperand(MMO);
512 } else if (VE::VM512RegClass.hasSubClassEq(RC)) {
513 BuildMI(MBB, I, DL, get(VE::STVM512rii))
514 .addFrameIndex(FI)
515 .addImm(0)
516 .addImm(0)
517 .addReg(SrcReg, getKillRegState(isKill))
518 .addMemOperand(MMO);
519 } else
520 report_fatal_error("Can't store this register to stack slot");
521}
522
525 int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
526 Register VReg, MachineInstr::MIFlag Flags) const {
527 DebugLoc DL;
528 if (I != MBB.end())
529 DL = I->getDebugLoc();
530
532 const MachineFrameInfo &MFI = MF->getFrameInfo();
535 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
536
537 if (RC == &VE::I64RegClass) {
538 BuildMI(MBB, I, DL, get(VE::LDrii), DestReg)
539 .addFrameIndex(FI)
540 .addImm(0)
541 .addImm(0)
542 .addMemOperand(MMO);
543 } else if (RC == &VE::I32RegClass) {
544 BuildMI(MBB, I, DL, get(VE::LDLSXrii), DestReg)
545 .addFrameIndex(FI)
546 .addImm(0)
547 .addImm(0)
548 .addMemOperand(MMO);
549 } else if (RC == &VE::F32RegClass) {
550 BuildMI(MBB, I, DL, get(VE::LDUrii), DestReg)
551 .addFrameIndex(FI)
552 .addImm(0)
553 .addImm(0)
554 .addMemOperand(MMO);
555 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
556 BuildMI(MBB, I, DL, get(VE::LDQrii), DestReg)
557 .addFrameIndex(FI)
558 .addImm(0)
559 .addImm(0)
560 .addMemOperand(MMO);
561 } else if (RC == &VE::VMRegClass) {
562 BuildMI(MBB, I, DL, get(VE::LDVMrii), DestReg)
563 .addFrameIndex(FI)
564 .addImm(0)
565 .addImm(0)
566 .addMemOperand(MMO);
567 } else if (VE::VM512RegClass.hasSubClassEq(RC)) {
568 BuildMI(MBB, I, DL, get(VE::LDVM512rii), DestReg)
569 .addFrameIndex(FI)
570 .addImm(0)
571 .addImm(0)
572 .addMemOperand(MMO);
573 } else
574 report_fatal_error("Can't load this register from stack slot");
575}
576
578 Register Reg, MachineRegisterInfo *MRI) const {
579 LLVM_DEBUG(dbgs() << "foldImmediate\n");
580
581 LLVM_DEBUG(dbgs() << "checking DefMI\n");
582 int64_t ImmVal;
583 switch (DefMI.getOpcode()) {
584 default:
585 return false;
586 case VE::ORim:
587 // General move small immediate instruction on VE.
588 LLVM_DEBUG(dbgs() << "checking ORim\n");
589 LLVM_DEBUG(DefMI.dump());
590 // FIXME: We may need to support FPImm too.
591 assert(DefMI.getOperand(1).isImm());
592 assert(DefMI.getOperand(2).isImm());
593 ImmVal =
594 DefMI.getOperand(1).getImm() + mimm2Val(DefMI.getOperand(2).getImm());
595 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
596 break;
597 case VE::LEAzii:
598 // General move immediate instruction on VE.
599 LLVM_DEBUG(dbgs() << "checking LEAzii\n");
600 LLVM_DEBUG(DefMI.dump());
601 // FIXME: We may need to support FPImm too.
602 assert(DefMI.getOperand(2).isImm());
603 if (!DefMI.getOperand(3).isImm())
604 // LEAzii may refer label
605 return false;
606 ImmVal = DefMI.getOperand(2).getImm() + DefMI.getOperand(3).getImm();
607 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
608 break;
609 }
610
611 // Try to fold like below:
612 // %1:i64 = ORim 0, 0(1)
613 // %2:i64 = CMPSLrr %0, %1
614 // To
615 // %2:i64 = CMPSLrm %0, 0(1)
616 //
617 // Another example:
618 // %1:i64 = ORim 6, 0(1)
619 // %2:i64 = CMPSLrr %1, %0
620 // To
621 // %2:i64 = CMPSLir 6, %0
622 //
623 // Support commutable instructions like below:
624 // %1:i64 = ORim 6, 0(1)
625 // %2:i64 = ADDSLrr %1, %0
626 // To
627 // %2:i64 = ADDSLri %0, 6
628 //
629 // FIXME: Need to support i32. Current implementtation requires
630 // EXTRACT_SUBREG, so input has following COPY and it avoids folding:
631 // %1:i64 = ORim 6, 0(1)
632 // %2:i32 = COPY %1.sub_i32
633 // %3:i32 = ADDSWSXrr %0, %2
634 // FIXME: Need to support shift, cmov, and more instructions.
635 // FIXME: Need to support lvl too, but LVLGen runs after peephole-opt.
636
637 LLVM_DEBUG(dbgs() << "checking UseMI\n");
638 LLVM_DEBUG(UseMI.dump());
639 unsigned NewUseOpcSImm7;
640 unsigned NewUseOpcMImm;
641 enum InstType {
642 rr2ri_rm, // rr -> ri or rm, commutable
643 rr2ir_rm, // rr -> ir or rm
644 } InstType;
645
646 using namespace llvm::VE;
647#define INSTRKIND(NAME) \
648 case NAME##rr: \
649 NewUseOpcSImm7 = NAME##ri; \
650 NewUseOpcMImm = NAME##rm; \
651 InstType = rr2ri_rm; \
652 break
653#define NCINSTRKIND(NAME) \
654 case NAME##rr: \
655 NewUseOpcSImm7 = NAME##ir; \
656 NewUseOpcMImm = NAME##rm; \
657 InstType = rr2ir_rm; \
658 break
659
660 switch (UseMI.getOpcode()) {
661 default:
662 return false;
663
664 INSTRKIND(ADDUL);
665 INSTRKIND(ADDSWSX);
666 INSTRKIND(ADDSWZX);
667 INSTRKIND(ADDSL);
668 NCINSTRKIND(SUBUL);
669 NCINSTRKIND(SUBSWSX);
670 NCINSTRKIND(SUBSWZX);
671 NCINSTRKIND(SUBSL);
672 INSTRKIND(MULUL);
673 INSTRKIND(MULSWSX);
674 INSTRKIND(MULSWZX);
675 INSTRKIND(MULSL);
676 NCINSTRKIND(DIVUL);
677 NCINSTRKIND(DIVSWSX);
678 NCINSTRKIND(DIVSWZX);
679 NCINSTRKIND(DIVSL);
680 NCINSTRKIND(CMPUL);
681 NCINSTRKIND(CMPSWSX);
682 NCINSTRKIND(CMPSWZX);
683 NCINSTRKIND(CMPSL);
684 INSTRKIND(MAXSWSX);
685 INSTRKIND(MAXSWZX);
686 INSTRKIND(MAXSL);
687 INSTRKIND(MINSWSX);
688 INSTRKIND(MINSWZX);
689 INSTRKIND(MINSL);
690 INSTRKIND(AND);
691 INSTRKIND(OR);
692 INSTRKIND(XOR);
693 INSTRKIND(EQV);
694 NCINSTRKIND(NND);
695 NCINSTRKIND(MRG);
696 }
697
698#undef INSTRKIND
699
700 unsigned NewUseOpc;
701 unsigned UseIdx;
702 bool Commute = false;
703 LLVM_DEBUG(dbgs() << "checking UseMI operands\n");
704 switch (InstType) {
705 case rr2ri_rm:
706 UseIdx = 2;
707 if (UseMI.getOperand(1).getReg() == Reg) {
708 Commute = true;
709 } else {
710 assert(UseMI.getOperand(2).getReg() == Reg);
711 }
712 if (isInt<7>(ImmVal)) {
713 // This ImmVal matches to SImm7 slot, so change UseOpc to an instruction
714 // holds a simm7 slot.
715 NewUseOpc = NewUseOpcSImm7;
716 } else if (isMImmVal(ImmVal)) {
717 // Similarly, change UseOpc to an instruction holds a mimm slot.
718 NewUseOpc = NewUseOpcMImm;
719 ImmVal = val2MImm(ImmVal);
720 } else
721 return false;
722 break;
723 case rr2ir_rm:
724 if (UseMI.getOperand(1).getReg() == Reg) {
725 // Check immediate value whether it matchs to the UseMI instruction.
726 if (!isInt<7>(ImmVal))
727 return false;
728 NewUseOpc = NewUseOpcSImm7;
729 UseIdx = 1;
730 } else {
731 assert(UseMI.getOperand(2).getReg() == Reg);
732 // Check immediate value whether it matchs to the UseMI instruction.
733 if (!isMImmVal(ImmVal))
734 return false;
735 NewUseOpc = NewUseOpcMImm;
736 ImmVal = val2MImm(ImmVal);
737 UseIdx = 2;
738 }
739 break;
740 }
741
742 LLVM_DEBUG(dbgs() << "modifying UseMI\n");
743 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
744 UseMI.setDesc(get(NewUseOpc));
745 if (Commute) {
746 UseMI.getOperand(1).setReg(UseMI.getOperand(UseIdx).getReg());
747 }
748 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
749 if (DeleteDef)
750 DefMI.eraseFromParent();
751
752 return true;
753}
754
757 Register GlobalBaseReg = VEFI->getGlobalBaseReg();
758 if (GlobalBaseReg != 0)
759 return GlobalBaseReg;
760
761 // We use %s15 (%got) as a global base register
762 GlobalBaseReg = VE::SX15;
763
764 // Insert a pseudo instruction to set the GlobalBaseReg into the first
765 // MBB of the function
766 MachineBasicBlock &FirstMBB = MF->front();
768 DebugLoc dl;
769 BuildMI(FirstMBB, MBBI, dl, get(VE::GETGOT), GlobalBaseReg);
770 VEFI->setGlobalBaseReg(GlobalBaseReg);
771 return GlobalBaseReg;
772}
773
775 return (reg - VE::VMP0) * 2 + VE::VM0;
776}
777
778static Register getVM512Lower(Register reg) { return getVM512Upper(reg) + 1; }
779
780// Expand pseudo logical vector instructions for VM512 registers.
781static void expandPseudoLogM(MachineInstr &MI, const MCInstrDesc &MCID) {
782 MachineBasicBlock *MBB = MI.getParent();
783 DebugLoc DL = MI.getDebugLoc();
784
785 Register VMXu = getVM512Upper(MI.getOperand(0).getReg());
786 Register VMXl = getVM512Lower(MI.getOperand(0).getReg());
787 Register VMYu = getVM512Upper(MI.getOperand(1).getReg());
788 Register VMYl = getVM512Lower(MI.getOperand(1).getReg());
789
790 switch (MI.getOpcode()) {
791 default: {
792 Register VMZu = getVM512Upper(MI.getOperand(2).getReg());
793 Register VMZl = getVM512Lower(MI.getOperand(2).getReg());
794 BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu).addUse(VMZu);
795 BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl).addUse(VMZl);
796 break;
797 }
798 case VE::NEGMy:
799 BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu);
800 BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl);
801 break;
802 }
803 MI.eraseFromParent();
804}
805
807 bool Upper) {
808 // VM512
809 MIB.addReg(Upper ? getVM512Upper(MI.getOperand(0).getReg())
810 : getVM512Lower(MI.getOperand(0).getReg()));
811
812 switch (MI.getNumExplicitOperands()) {
813 default:
814 report_fatal_error("unexpected number of operands for pvfmk");
815 case 2: // _Ml: VM512, VL
816 // VL
817 MIB.addReg(MI.getOperand(1).getReg());
818 break;
819 case 4: // _Mvl: VM512, CC, VR, VL
820 // CC
821 MIB.addImm(MI.getOperand(1).getImm());
822 // VR
823 MIB.addReg(MI.getOperand(2).getReg());
824 // VL
825 MIB.addReg(MI.getOperand(3).getReg());
826 break;
827 case 5: // _MvMl: VM512, CC, VR, VM512, VL
828 // CC
829 MIB.addImm(MI.getOperand(1).getImm());
830 // VR
831 MIB.addReg(MI.getOperand(2).getReg());
832 // VM512
833 MIB.addReg(Upper ? getVM512Upper(MI.getOperand(3).getReg())
834 : getVM512Lower(MI.getOperand(3).getReg()));
835 // VL
836 MIB.addReg(MI.getOperand(4).getReg());
837 break;
838 }
839}
840
842 // replace to pvfmk.w.up and pvfmk.w.lo
843 // replace to pvfmk.s.up and pvfmk.s.lo
844
845 static const std::pair<unsigned, std::pair<unsigned, unsigned>> VFMKMap[] = {
846 {VE::VFMKyal, {VE::VFMKLal, VE::VFMKLal}},
847 {VE::VFMKynal, {VE::VFMKLnal, VE::VFMKLnal}},
848 {VE::VFMKWyvl, {VE::PVFMKWUPvl, VE::PVFMKWLOvl}},
849 {VE::VFMKWyvyl, {VE::PVFMKWUPvml, VE::PVFMKWLOvml}},
850 {VE::VFMKSyvl, {VE::PVFMKSUPvl, VE::PVFMKSLOvl}},
851 {VE::VFMKSyvyl, {VE::PVFMKSUPvml, VE::PVFMKSLOvml}},
852 };
853
854 unsigned Opcode = MI.getOpcode();
855
856 const auto *Found =
857 llvm::find_if(VFMKMap, [&](auto P) { return P.first == Opcode; });
858 if (Found == std::end(VFMKMap))
859 report_fatal_error("unexpected opcode for pseudo vfmk");
860
861 unsigned OpcodeUpper = (*Found).second.first;
862 unsigned OpcodeLower = (*Found).second.second;
863
864 MachineBasicBlock *MBB = MI.getParent();
865 DebugLoc DL = MI.getDebugLoc();
866
867 MachineInstrBuilder Bu = BuildMI(*MBB, MI, DL, TI.get(OpcodeUpper));
868 addOperandsForVFMK(Bu, MI, /* Upper */ true);
869 MachineInstrBuilder Bl = BuildMI(*MBB, MI, DL, TI.get(OpcodeLower));
870 addOperandsForVFMK(Bl, MI, /* Upper */ false);
871
872 MI.eraseFromParent();
873}
874
876 switch (MI.getOpcode()) {
877 case VE::EXTEND_STACK: {
879 }
880 case VE::EXTEND_STACK_GUARD: {
881 MI.eraseFromParent(); // The pseudo instruction is gone now.
882 return true;
883 }
884 case VE::GETSTACKTOP: {
886 }
887
888 case VE::ANDMyy:
889 expandPseudoLogM(MI, get(VE::ANDMmm));
890 return true;
891 case VE::ORMyy:
892 expandPseudoLogM(MI, get(VE::ORMmm));
893 return true;
894 case VE::XORMyy:
895 expandPseudoLogM(MI, get(VE::XORMmm));
896 return true;
897 case VE::EQVMyy:
898 expandPseudoLogM(MI, get(VE::EQVMmm));
899 return true;
900 case VE::NNDMyy:
901 expandPseudoLogM(MI, get(VE::NNDMmm));
902 return true;
903 case VE::NEGMy:
904 expandPseudoLogM(MI, get(VE::NEGMm));
905 return true;
906
907 case VE::LVMyir:
908 case VE::LVMyim:
909 case VE::LVMyir_y:
910 case VE::LVMyim_y: {
911 Register VMXu = getVM512Upper(MI.getOperand(0).getReg());
912 Register VMXl = getVM512Lower(MI.getOperand(0).getReg());
913 int64_t Imm = MI.getOperand(1).getImm();
914 bool IsSrcReg =
915 MI.getOpcode() == VE::LVMyir || MI.getOpcode() == VE::LVMyir_y;
916 Register Src = IsSrcReg ? MI.getOperand(2).getReg() : VE::NoRegister;
917 int64_t MImm = IsSrcReg ? 0 : MI.getOperand(2).getImm();
918 bool KillSrc = IsSrcReg ? MI.getOperand(2).isKill() : false;
919 Register VMX = VMXl;
920 if (Imm >= 4) {
921 VMX = VMXu;
922 Imm -= 4;
923 }
924 MachineBasicBlock *MBB = MI.getParent();
925 DebugLoc DL = MI.getDebugLoc();
926 switch (MI.getOpcode()) {
927 case VE::LVMyir:
928 BuildMI(*MBB, MI, DL, get(VE::LVMir))
929 .addDef(VMX)
930 .addImm(Imm)
931 .addReg(Src, getKillRegState(KillSrc));
932 break;
933 case VE::LVMyim:
934 BuildMI(*MBB, MI, DL, get(VE::LVMim))
935 .addDef(VMX)
936 .addImm(Imm)
937 .addImm(MImm);
938 break;
939 case VE::LVMyir_y:
940 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
941 "LVMyir_y has different register in 3rd operand");
942 BuildMI(*MBB, MI, DL, get(VE::LVMir_m))
943 .addDef(VMX)
944 .addImm(Imm)
945 .addReg(Src, getKillRegState(KillSrc))
946 .addReg(VMX);
947 break;
948 case VE::LVMyim_y:
949 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
950 "LVMyim_y has different register in 3rd operand");
951 BuildMI(*MBB, MI, DL, get(VE::LVMim_m))
952 .addDef(VMX)
953 .addImm(Imm)
954 .addImm(MImm)
955 .addReg(VMX);
956 break;
957 }
958 MI.eraseFromParent();
959 return true;
960 }
961 case VE::SVMyi: {
962 Register Dest = MI.getOperand(0).getReg();
963 Register VMZu = getVM512Upper(MI.getOperand(1).getReg());
964 Register VMZl = getVM512Lower(MI.getOperand(1).getReg());
965 bool KillSrc = MI.getOperand(1).isKill();
966 int64_t Imm = MI.getOperand(2).getImm();
967 Register VMZ = VMZl;
968 if (Imm >= 4) {
969 VMZ = VMZu;
970 Imm -= 4;
971 }
972 MachineBasicBlock *MBB = MI.getParent();
973 DebugLoc DL = MI.getDebugLoc();
975 BuildMI(*MBB, MI, DL, get(VE::SVMmi), Dest).addReg(VMZ).addImm(Imm);
976 MachineInstr *Inst = MIB.getInstr();
977 if (KillSrc) {
979 Inst->addRegisterKilled(MI.getOperand(1).getReg(), TRI, true);
980 }
981 MI.eraseFromParent();
982 return true;
983 }
984 case VE::VFMKyal:
985 case VE::VFMKynal:
986 case VE::VFMKWyvl:
987 case VE::VFMKWyvyl:
988 case VE::VFMKSyvl:
989 case VE::VFMKSyvyl:
990 expandPseudoVFMK(*this, MI);
991 return true;
992 }
993 return false;
994}
995
997 MachineBasicBlock &MBB = *MI.getParent();
999 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
1000 const VEInstrInfo &TII = *STI.getInstrInfo();
1002
1003 // Create following instructions and multiple basic blocks.
1004 //
1005 // thisBB:
1006 // brge.l.t %sp, %sl, sinkBB
1007 // syscallBB:
1008 // ld %s61, 0x18(, %tp) // load param area
1009 // or %s62, 0, %s0 // spill the value of %s0
1010 // lea %s63, 0x13b // syscall # of grow
1011 // shm.l %s63, 0x0(%s61) // store syscall # at addr:0
1012 // shm.l %sl, 0x8(%s61) // store old limit at addr:8
1013 // shm.l %sp, 0x10(%s61) // store new limit at addr:16
1014 // monc // call monitor
1015 // or %s0, 0, %s62 // restore the value of %s0
1016 // sinkBB:
1017
1018 // Create new MBB
1019 MachineBasicBlock *BB = &MBB;
1020 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1021 MachineBasicBlock *syscallMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1022 MachineBasicBlock *sinkMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1024 MF.insert(It, syscallMBB);
1025 MF.insert(It, sinkMBB);
1026
1027 // Transfer the remainder of BB and its successor edges to sinkMBB.
1028 sinkMBB->splice(sinkMBB->begin(), BB,
1029 std::next(std::next(MachineBasicBlock::iterator(MI))),
1030 BB->end());
1032
1033 // Next, add the true and fallthrough blocks as its successors.
1034 BB->addSuccessor(syscallMBB);
1035 BB->addSuccessor(sinkMBB);
1036 BuildMI(BB, dl, TII.get(VE::BRCFLrr_t))
1038 .addReg(VE::SX11) // %sp
1039 .addReg(VE::SX8) // %sl
1040 .addMBB(sinkMBB);
1041
1042 BB = syscallMBB;
1043
1044 // Update machine-CFG edges
1045 BB->addSuccessor(sinkMBB);
1046
1047 BuildMI(BB, dl, TII.get(VE::LDrii), VE::SX61)
1048 .addReg(VE::SX14)
1049 .addImm(0)
1050 .addImm(0x18);
1051 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX62)
1052 .addReg(VE::SX0)
1053 .addImm(0);
1054 BuildMI(BB, dl, TII.get(VE::LEAzii), VE::SX63)
1055 .addImm(0)
1056 .addImm(0)
1057 .addImm(0x13b);
1058 BuildMI(BB, dl, TII.get(VE::SHMLri))
1059 .addReg(VE::SX61)
1060 .addImm(0)
1061 .addReg(VE::SX63);
1062 BuildMI(BB, dl, TII.get(VE::SHMLri))
1063 .addReg(VE::SX61)
1064 .addImm(8)
1065 .addReg(VE::SX8);
1066 BuildMI(BB, dl, TII.get(VE::SHMLri))
1067 .addReg(VE::SX61)
1068 .addImm(16)
1069 .addReg(VE::SX11);
1070 BuildMI(BB, dl, TII.get(VE::MONC));
1071
1072 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX0)
1073 .addReg(VE::SX62)
1074 .addImm(0);
1075
1076 MI.eraseFromParent(); // The pseudo instruction is gone now.
1077 return true;
1078}
1079
1081 MachineBasicBlock *MBB = MI.getParent();
1082 MachineFunction &MF = *MBB->getParent();
1083 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
1084 const VEInstrInfo &TII = *STI.getInstrInfo();
1086
1087 // Create following instruction
1088 //
1089 // dst = %sp + target specific frame + the size of parameter area
1090
1091 const MachineFrameInfo &MFI = MF.getFrameInfo();
1092 const VEFrameLowering &TFL = *STI.getFrameLowering();
1093
1094 // The VE ABI requires a reserved area at the top of stack as described
1095 // in VEFrameLowering.cpp. So, we adjust it here.
1096 unsigned NumBytes = STI.getAdjustedFrameSize(0);
1097
1098 // Also adds the size of parameter area.
1099 if (MFI.adjustsStack() && TFL.hasReservedCallFrame(MF))
1100 NumBytes += MFI.getMaxCallFrameSize();
1101
1102 BuildMI(*MBB, MI, DL, TII.get(VE::LEArii))
1103 .addDef(MI.getOperand(0).getReg())
1104 .addReg(VE::SX11)
1105 .addImm(0)
1106 .addImm(NumBytes);
1107
1108 MI.eraseFromParent(); // The pseudo instruction is gone now.
1109 return true;
1110}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static bool isReg(const MCInst &MI, unsigned OpNo)
#define P(N)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the SmallVector class.
static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
static bool IsIntegerCC(unsigned CC)
Definition: VEInstrInfo.cpp:40
static void expandPseudoVFMK(const TargetInstrInfo &TI, MachineInstr &MI)
#define INSTRKIND(NAME)
#define NCINSTRKIND(NAME)
static Register getVM512Lower(Register reg)
static void copyPhysSubRegs(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, const MCInstrDesc &MCID, unsigned int NumSubRegs, const unsigned *SubRegIdx, const TargetRegisterInfo *TRI)
static bool IsAliasOfSX(Register Reg)
static Register getVM512Upper(Register reg)
#define BRKIND(NAME)
static void expandPseudoLogM(MachineInstr &MI, const MCInstrDesc &MCID)
static void addOperandsForVFMK(MachineInstrBuilder &MIB, MachineInstr &MI, bool Upper)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
A debug info location.
Definition: DebugLoc.h:33
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:230
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:575
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:585
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
int64_t getImm() const
MachineBasicBlock * getMBB() const
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Target - Wrapper for Target specific information.
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
} Stack Spill & Reload
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool expandPostRAPseudo(MachineInstr &MI) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Register getGlobalBaseReg(MachineFunction *MF) const
} Optimization
const VERegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
Definition: VEInstrInfo.h:62
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
VEInstrInfo(VESubtarget &ST)
Definition: VEInstrInfo.cpp:37
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Stack Spill & Reload {.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
Branch Analysis & Modification {.
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
isStoreToStackSlot - If the specified machine instruction is a direct store to a stack slot,...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
} Branch Analysis & Modification
bool expandExtendStackPseudo(MachineInstr &MI) const
bool expandGetStackTopPseudo(MachineInstr &MI) const
uint64_t getAdjustedFrameSize(uint64_t FrameSize) const
Given a actual stack size as determined by FrameInfo, this function returns adjusted framesize which ...
Definition: VESubtarget.cpp:48
const VEInstrInfo * getInstrInfo() const override
Definition: VESubtarget.h:51
const VEFrameLowering * getFrameLowering() const override
Definition: VESubtarget.h:52
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CondCode
Definition: VE.h:42
@ CC_GENAN
Definition: VE.h:65
@ CC_EQNAN
Definition: VE.h:64
@ CC_G
Definition: VE.h:53
@ CC_NENAN
Definition: VE.h:63
@ CC_LE
Definition: VE.h:58
@ CC_LENAN
Definition: VE.h:66
@ CC_ILE
Definition: VE.h:49
@ CC_NUM
Definition: VE.h:59
@ CC_EQ
Definition: VE.h:56
@ CC_GNAN
Definition: VE.h:61
@ CC_IG
Definition: VE.h:44
@ CC_INE
Definition: VE.h:46
@ CC_AF
Definition: VE.h:52
@ CC_L
Definition: VE.h:54
@ CC_GE
Definition: VE.h:57
@ CC_NE
Definition: VE.h:55
@ CC_LNAN
Definition: VE.h:62
@ CC_IEQ
Definition: VE.h:47
@ UNKNOWN
Definition: VE.h:68
@ CC_AT
Definition: VE.h:67
@ CC_NAN
Definition: VE.h:60
@ CC_IGE
Definition: VE.h:48
@ CC_IL
Definition: VE.h:45
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
static bool isCondBranchOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static bool isIndirectBranchOpcode(int Opc)
unsigned M1(unsigned Val)
Definition: VE.h:376
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
static uint64_t val2MImm(uint64_t Val)
val2MImm - Convert an integer immediate value to target MImm immediate.
Definition: VE.h:358
unsigned getKillRegState(bool B)
static uint64_t mimm2Val(uint64_t Val)
mimm2Val - Convert a target MImm immediate to an integer immediate value.
Definition: VE.h:367
static bool isUncondBranchOpcode(int Opc)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
static bool isMImmVal(uint64_t Val)
Definition: VE.h:331
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.