clang 22.0.0git
CIRGenStmt.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Stmt nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15
16#include "mlir/IR/Builders.h"
17#include "mlir/IR/Location.h"
18#include "mlir/Support/LLVM.h"
19#include "clang/AST/ExprCXX.h"
20#include "clang/AST/Stmt.h"
24
25using namespace clang;
26using namespace clang::CIRGen;
27using namespace cir;
28
29static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf,
30 const Stmt *exprResult,
31 AggValueSlot slot,
32 Address *lastValue) {
33 // We have to special case labels here. They are statements, but when put
34 // at the end of a statement expression, they yield the value of their
35 // subexpression. Handle this by walking through all labels we encounter,
36 // emitting them before we evaluate the subexpr.
37 // Similar issues arise for attributed statements.
38 while (!isa<Expr>(exprResult)) {
39 if (const auto *ls = dyn_cast<LabelStmt>(exprResult)) {
40 if (cgf.emitLabel(*ls->getDecl()).failed())
41 return mlir::failure();
42 exprResult = ls->getSubStmt();
43 } else if (const auto *as = dyn_cast<AttributedStmt>(exprResult)) {
44 // FIXME: Update this if we ever have attributes that affect the
45 // semantics of an expression.
46 exprResult = as->getSubStmt();
47 } else {
48 llvm_unreachable("Unknown value statement");
49 }
50 }
51
52 const Expr *e = cast<Expr>(exprResult);
53 QualType exprTy = e->getType();
54 if (cgf.hasAggregateEvaluationKind(exprTy)) {
55 cgf.emitAggExpr(e, slot);
56 } else {
57 // We can't return an RValue here because there might be cleanups at
58 // the end of the StmtExpr. Because of that, we have to emit the result
59 // here into a temporary alloca.
60 cgf.emitAnyExprToMem(e, *lastValue, Qualifiers(),
61 /*IsInit*/ false);
62 }
63
64 return mlir::success();
65}
66
68 const CompoundStmt &s, Address *lastValue, AggValueSlot slot) {
69 mlir::LogicalResult result = mlir::success();
70 const Stmt *exprResult = s.body_back();
71 assert((!lastValue || (lastValue && exprResult)) &&
72 "If lastValue is not null then the CompoundStmt must have a "
73 "StmtExprResult");
74
75 for (const Stmt *curStmt : s.body()) {
76 const bool saveResult = lastValue && exprResult == curStmt;
77 if (saveResult) {
78 if (emitStmtWithResult(*this, exprResult, slot, lastValue).failed())
79 result = mlir::failure();
80 } else {
81 if (emitStmt(curStmt, /*useCurrentScope=*/false).failed())
82 result = mlir::failure();
83 }
84 }
85 return result;
86}
87
89 Address *lastValue,
90 AggValueSlot slot) {
91 // Add local scope to track new declared variables.
93 mlir::Location scopeLoc = getLoc(s.getSourceRange());
94 mlir::OpBuilder::InsertPoint scopeInsPt;
95 cir::ScopeOp::create(
96 builder, scopeLoc,
97 [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) {
98 scopeInsPt = b.saveInsertionPoint();
99 });
100 mlir::OpBuilder::InsertionGuard guard(builder);
101 builder.restoreInsertionPoint(scopeInsPt);
102 LexicalScope lexScope(*this, scopeLoc, builder.getInsertionBlock());
103 return emitCompoundStmtWithoutScope(s, lastValue, slot);
104}
105
109
110// Build CIR for a statement. useCurrentScope should be true if no new scopes
111// need to be created when finding a compound statement.
112mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
113 bool useCurrentScope,
115 if (mlir::succeeded(emitSimpleStmt(s, useCurrentScope)))
116 return mlir::success();
117
118 switch (s->getStmtClass()) {
120 case Stmt::CXXCatchStmtClass:
121 case Stmt::SEHExceptStmtClass:
122 case Stmt::SEHFinallyStmtClass:
123 case Stmt::MSDependentExistsStmtClass:
124 llvm_unreachable("invalid statement class to emit generically");
125 case Stmt::BreakStmtClass:
126 case Stmt::NullStmtClass:
127 case Stmt::CompoundStmtClass:
128 case Stmt::ContinueStmtClass:
129 case Stmt::DeclStmtClass:
130 case Stmt::ReturnStmtClass:
131 llvm_unreachable("should have emitted these statements as simple");
132
133#define STMT(Type, Base)
134#define ABSTRACT_STMT(Op)
135#define EXPR(Type, Base) case Stmt::Type##Class:
136#include "clang/AST/StmtNodes.inc"
137 {
138 assert(builder.getInsertionBlock() &&
139 "expression emission must have an insertion point");
140
142
143 // Classic codegen has a check here to see if the emitter created a new
144 // block that isn't used (comparing the incoming and outgoing insertion
145 // points) and deletes the outgoing block if it's not used. In CIR, we
146 // will handle that during the cir.canonicalize pass.
147 return mlir::success();
148 }
149 case Stmt::IfStmtClass:
150 return emitIfStmt(cast<IfStmt>(*s));
151 case Stmt::SwitchStmtClass:
153 case Stmt::ForStmtClass:
154 return emitForStmt(cast<ForStmt>(*s));
155 case Stmt::WhileStmtClass:
157 case Stmt::DoStmtClass:
158 return emitDoStmt(cast<DoStmt>(*s));
159 case Stmt::CXXTryStmtClass:
161 case Stmt::CXXForRangeStmtClass:
163 case Stmt::CoroutineBodyStmtClass:
165 case Stmt::IndirectGotoStmtClass:
167 case Stmt::OpenACCComputeConstructClass:
169 case Stmt::OpenACCLoopConstructClass:
171 case Stmt::OpenACCCombinedConstructClass:
173 case Stmt::OpenACCDataConstructClass:
175 case Stmt::OpenACCEnterDataConstructClass:
177 case Stmt::OpenACCExitDataConstructClass:
179 case Stmt::OpenACCHostDataConstructClass:
181 case Stmt::OpenACCWaitConstructClass:
183 case Stmt::OpenACCInitConstructClass:
185 case Stmt::OpenACCShutdownConstructClass:
187 case Stmt::OpenACCSetConstructClass:
189 case Stmt::OpenACCUpdateConstructClass:
191 case Stmt::OpenACCCacheConstructClass:
193 case Stmt::OpenACCAtomicConstructClass:
195 case Stmt::GCCAsmStmtClass:
196 case Stmt::MSAsmStmtClass:
197 return emitAsmStmt(cast<AsmStmt>(*s));
198 case Stmt::OMPScopeDirectiveClass:
200 case Stmt::OMPErrorDirectiveClass:
202 case Stmt::OMPParallelDirectiveClass:
204 case Stmt::OMPTaskwaitDirectiveClass:
206 case Stmt::OMPTaskyieldDirectiveClass:
208 case Stmt::OMPBarrierDirectiveClass:
210 case Stmt::OMPMetaDirectiveClass:
212 case Stmt::OMPCanonicalLoopClass:
214 case Stmt::OMPSimdDirectiveClass:
216 case Stmt::OMPTileDirectiveClass:
218 case Stmt::OMPUnrollDirectiveClass:
220 case Stmt::OMPFuseDirectiveClass:
222 case Stmt::OMPForDirectiveClass:
224 case Stmt::OMPForSimdDirectiveClass:
226 case Stmt::OMPSectionsDirectiveClass:
228 case Stmt::OMPSectionDirectiveClass:
230 case Stmt::OMPSingleDirectiveClass:
232 case Stmt::OMPMasterDirectiveClass:
234 case Stmt::OMPCriticalDirectiveClass:
236 case Stmt::OMPParallelForDirectiveClass:
238 case Stmt::OMPParallelForSimdDirectiveClass:
241 case Stmt::OMPParallelMasterDirectiveClass:
243 case Stmt::OMPParallelSectionsDirectiveClass:
246 case Stmt::OMPTaskDirectiveClass:
248 case Stmt::OMPTaskgroupDirectiveClass:
250 case Stmt::OMPFlushDirectiveClass:
252 case Stmt::OMPDepobjDirectiveClass:
254 case Stmt::OMPScanDirectiveClass:
256 case Stmt::OMPOrderedDirectiveClass:
258 case Stmt::OMPAtomicDirectiveClass:
260 case Stmt::OMPTargetDirectiveClass:
262 case Stmt::OMPTeamsDirectiveClass:
264 case Stmt::OMPCancellationPointDirectiveClass:
267 case Stmt::OMPCancelDirectiveClass:
269 case Stmt::OMPTargetDataDirectiveClass:
271 case Stmt::OMPTargetEnterDataDirectiveClass:
274 case Stmt::OMPTargetExitDataDirectiveClass:
276 case Stmt::OMPTargetParallelDirectiveClass:
278 case Stmt::OMPTargetParallelForDirectiveClass:
281 case Stmt::OMPTaskLoopDirectiveClass:
283 case Stmt::OMPTaskLoopSimdDirectiveClass:
285 case Stmt::OMPMaskedTaskLoopDirectiveClass:
287 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
290 case Stmt::OMPMasterTaskLoopDirectiveClass:
292 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
295 case Stmt::OMPParallelGenericLoopDirectiveClass:
298 case Stmt::OMPParallelMaskedDirectiveClass:
300 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
303 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
306 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
309 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
312 case Stmt::OMPDistributeDirectiveClass:
314 case Stmt::OMPDistributeParallelForDirectiveClass:
317 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
320 case Stmt::OMPDistributeSimdDirectiveClass:
322 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
325 case Stmt::OMPTargetParallelForSimdDirectiveClass:
328 case Stmt::OMPTargetSimdDirectiveClass:
330 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
333 case Stmt::OMPTargetUpdateDirectiveClass:
335 case Stmt::OMPTeamsDistributeDirectiveClass:
338 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
341 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
344 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
347 case Stmt::OMPTeamsGenericLoopDirectiveClass:
350 case Stmt::OMPTargetTeamsDirectiveClass:
352 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
355 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
358 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
361 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
364 case Stmt::OMPInteropDirectiveClass:
366 case Stmt::OMPDispatchDirectiveClass:
368 case Stmt::OMPGenericLoopDirectiveClass:
370 case Stmt::OMPReverseDirectiveClass:
372 case Stmt::OMPInterchangeDirectiveClass:
374 case Stmt::OMPAssumeDirectiveClass:
376 case Stmt::OMPMaskedDirectiveClass:
378 case Stmt::OMPStripeDirectiveClass:
380 case Stmt::LabelStmtClass:
381 case Stmt::AttributedStmtClass:
382 case Stmt::GotoStmtClass:
383 case Stmt::DefaultStmtClass:
384 case Stmt::CaseStmtClass:
385 case Stmt::SEHLeaveStmtClass:
386 case Stmt::SYCLKernelCallStmtClass:
387 case Stmt::CoreturnStmtClass:
388 case Stmt::CapturedStmtClass:
389 case Stmt::ObjCAtTryStmtClass:
390 case Stmt::ObjCAtThrowStmtClass:
391 case Stmt::ObjCAtSynchronizedStmtClass:
392 case Stmt::ObjCForCollectionStmtClass:
393 case Stmt::ObjCAutoreleasePoolStmtClass:
394 case Stmt::SEHTryStmtClass:
395 case Stmt::ObjCAtCatchStmtClass:
396 case Stmt::ObjCAtFinallyStmtClass:
397 case Stmt::DeferStmtClass:
398 cgm.errorNYI(s->getSourceRange(),
399 std::string("emitStmt: ") + s->getStmtClassName());
400 return mlir::failure();
401 }
402
403 llvm_unreachable("Unexpected statement class");
404}
405
406mlir::LogicalResult CIRGenFunction::emitSimpleStmt(const Stmt *s,
407 bool useCurrentScope) {
408 switch (s->getStmtClass()) {
409 default:
410 return mlir::failure();
411 case Stmt::DeclStmtClass:
412 return emitDeclStmt(cast<DeclStmt>(*s));
413 case Stmt::CompoundStmtClass:
414 if (useCurrentScope)
417 case Stmt::GotoStmtClass:
418 return emitGotoStmt(cast<GotoStmt>(*s));
419 case Stmt::ContinueStmtClass:
421
422 // NullStmt doesn't need any handling, but we need to say we handled it.
423 case Stmt::NullStmtClass:
424 break;
425
426 case Stmt::LabelStmtClass:
428 case Stmt::CaseStmtClass:
429 case Stmt::DefaultStmtClass:
430 // If we reached here, we must not handling a switch case in the top level.
432 /*buildingTopLevelCase=*/false);
433 break;
434
435 case Stmt::BreakStmtClass:
437 case Stmt::ReturnStmtClass:
439 }
440
441 return mlir::success();
442}
443
445
446 if (emitLabel(*s.getDecl()).failed())
447 return mlir::failure();
448
449 if (getContext().getLangOpts().EHAsynch && s.isSideEntry())
450 getCIRGenModule().errorNYI(s.getSourceRange(), "IsEHa: not implemented.");
451
452 return emitStmt(s.getSubStmt(), /*useCurrentScope*/ true);
453}
454
455// Add a terminating yield on a body region if no other terminators are used.
456static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r,
457 mlir::Location loc) {
458 if (r.empty())
459 return;
460
462 unsigned numBlocks = r.getBlocks().size();
463 for (auto &block : r.getBlocks()) {
464 // Already cleanup after return operations, which might create
465 // empty blocks if emitted as last stmt.
466 if (numBlocks != 1 && block.empty() && block.hasNoPredecessors() &&
467 block.hasNoSuccessors())
468 eraseBlocks.push_back(&block);
469
470 if (block.empty() ||
471 !block.back().hasTrait<mlir::OpTrait::IsTerminator>()) {
472 mlir::OpBuilder::InsertionGuard guardCase(builder);
473 builder.setInsertionPointToEnd(&block);
474 builder.createYield(loc);
475 }
476 }
477
478 for (auto *b : eraseBlocks)
479 b->erase();
480}
481
482mlir::LogicalResult CIRGenFunction::emitIfStmt(const IfStmt &s) {
483 mlir::LogicalResult res = mlir::success();
484 // The else branch of a consteval if statement is always the only branch
485 // that can be runtime evaluated.
486 const Stmt *constevalExecuted;
487 if (s.isConsteval()) {
488 constevalExecuted = s.isNegatedConsteval() ? s.getThen() : s.getElse();
489 if (!constevalExecuted) {
490 // No runtime code execution required
491 return res;
492 }
493 }
494
495 // C99 6.8.4.1: The first substatement is executed if the expression
496 // compares unequal to 0. The condition must be a scalar type.
497 auto ifStmtBuilder = [&]() -> mlir::LogicalResult {
498 if (s.isConsteval())
499 return emitStmt(constevalExecuted, /*useCurrentScope=*/true);
500
501 if (s.getInit())
502 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
503 return mlir::failure();
504
505 if (s.getConditionVariable())
506 emitDecl(*s.getConditionVariable());
507
508 // If the condition folds to a constant and this is an 'if constexpr',
509 // we simplify it early in CIRGen to avoid emitting the full 'if'.
510 bool condConstant;
511 if (constantFoldsToBool(s.getCond(), condConstant, s.isConstexpr())) {
512 if (s.isConstexpr()) {
513 // Handle "if constexpr" explicitly here to avoid generating some
514 // ill-formed code since in CIR the "if" is no longer simplified
515 // in this lambda like in Clang but postponed to other MLIR
516 // passes.
517 if (const Stmt *executed = condConstant ? s.getThen() : s.getElse())
518 return emitStmt(executed, /*useCurrentScope=*/true);
519 // There is nothing to execute at runtime.
520 // TODO(cir): there is still an empty cir.scope generated by the caller.
521 return mlir::success();
522 }
523 }
524
527 return emitIfOnBoolExpr(s.getCond(), s.getThen(), s.getElse());
528 };
529
530 // TODO: Add a new scoped symbol table.
531 // LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
532 // The if scope contains the full source range for IfStmt.
533 mlir::Location scopeLoc = getLoc(s.getSourceRange());
534 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
535 [&](mlir::OpBuilder &b, mlir::Location loc) {
536 LexicalScope lexScope{*this, scopeLoc,
537 builder.getInsertionBlock()};
538 res = ifStmtBuilder();
539 });
540
541 return res;
542}
543
544mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &s) {
545 assert(builder.getInsertionBlock() && "expected valid insertion point");
546
547 for (const Decl *i : s.decls())
548 emitDecl(*i, /*evaluateConditionDecl=*/true);
549
550 return mlir::success();
551}
552
553mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &s) {
554 mlir::Location loc = getLoc(s.getSourceRange());
555 const Expr *rv = s.getRetValue();
556
557 RunCleanupsScope cleanupScope(*this);
558 bool createNewScope = false;
559 if (const auto *ewc = dyn_cast_or_null<ExprWithCleanups>(rv)) {
560 rv = ewc->getSubExpr();
561 createNewScope = true;
562 }
563
564 auto handleReturnVal = [&]() {
565 if (getContext().getLangOpts().ElideConstructors && s.getNRVOCandidate() &&
566 s.getNRVOCandidate()->isNRVOVariable()) {
568 // Apply the named return value optimization for this return statement,
569 // which means doing nothing: the appropriate result has already been
570 // constructed into the NRVO variable.
571
572 // If there is an NRVO flag for this variable, set it to 1 into indicate
573 // that the cleanup code should not destroy the variable.
574 if (auto nrvoFlag = nrvoFlags[s.getNRVOCandidate()])
575 builder.createFlagStore(loc, true, nrvoFlag);
576 } else if (!rv) {
577 // No return expression. Do nothing.
578 } else if (rv->getType()->isVoidType()) {
579 // Make sure not to return anything, but evaluate the expression
580 // for side effects.
581 if (rv) {
582 emitAnyExpr(rv);
583 }
584 } else if (cast<FunctionDecl>(curGD.getDecl())
585 ->getReturnType()
586 ->isReferenceType()) {
587 // If this function returns a reference, take the address of the
588 // expression rather than the value.
590 builder.CIRBaseBuilderTy::createStore(loc, result.getValue(),
591 *fnRetAlloca);
592 } else {
593 mlir::Value value = nullptr;
595 case cir::TEK_Scalar:
596 value = emitScalarExpr(rv);
597 if (value) { // Change this to an assert once emitScalarExpr is complete
598 builder.CIRBaseBuilderTy::createStore(loc, value, *fnRetAlloca);
599 }
600 break;
601 case cir::TEK_Complex:
604 /*isInit=*/true);
605 break;
612 break;
613 }
614 }
615 };
616
617 if (!createNewScope) {
618 handleReturnVal();
619 } else {
620 mlir::Location scopeLoc =
621 getLoc(rv ? rv->getSourceRange() : s.getSourceRange());
622 // First create cir.scope and later emit it's body. Otherwise all CIRGen
623 // dispatched by `handleReturnVal()` might needs to manipulate blocks and
624 // look into parents, which are all unlinked.
625 mlir::OpBuilder::InsertPoint scopeBody;
626 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
627 [&](mlir::OpBuilder &b, mlir::Location loc) {
628 scopeBody = b.saveInsertionPoint();
629 });
630 {
631 mlir::OpBuilder::InsertionGuard guard(builder);
632 builder.restoreInsertionPoint(scopeBody);
633 CIRGenFunction::LexicalScope lexScope{*this, scopeLoc,
634 builder.getInsertionBlock()};
635 handleReturnVal();
636 }
637 }
638
639 cleanupScope.forceCleanup();
640
641 // In CIR we might have returns in different scopes.
642 // FIXME(cir): cleanup code is handling actual return emission, the logic
643 // should try to match traditional codegen more closely (to the extent which
644 // is possible).
645 auto *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
647
648 // Insert the new block to continue codegen after branch to ret block.
649 builder.createBlock(builder.getBlock()->getParent());
650
651 return mlir::success();
652}
653
654mlir::LogicalResult CIRGenFunction::emitGotoStmt(const clang::GotoStmt &s) {
655 // FIXME: LLVM codegen inserts emit a stop point here for debug info
656 // sake when the insertion point is available, but doesn't do
657 // anything special when there isn't. We haven't implemented debug
658 // info support just yet, look at this again once we have it.
660
661 cir::GotoOp::create(builder, getLoc(s.getSourceRange()),
662 s.getLabel()->getName());
663
664 // A goto marks the end of a block, create a new one for codegen after
665 // emitGotoStmt can resume building in that block.
666 // Insert the new block to continue codegen after goto.
667 builder.createBlock(builder.getBlock()->getParent());
668
669 return mlir::success();
670}
671
672mlir::LogicalResult
674 mlir::Value val = emitScalarExpr(s.getTarget());
675 assert(indirectGotoBlock &&
676 "If you jumping to a indirect branch should be alareadye emitted");
677 cir::BrOp::create(builder, getLoc(s.getSourceRange()), indirectGotoBlock,
678 val);
679 builder.createBlock(builder.getBlock()->getParent());
680 return mlir::success();
681}
682
683mlir::LogicalResult
685 builder.createContinue(getLoc(s.getKwLoc()));
686
687 // Insert the new block to continue codegen after the continue statement.
688 builder.createBlock(builder.getBlock()->getParent());
689
690 return mlir::success();
691}
692
693mlir::LogicalResult CIRGenFunction::emitLabel(const clang::LabelDecl &d) {
694 // Create a new block to tag with a label and add a branch from
695 // the current one to it. If the block is empty just call attach it
696 // to this label.
697 mlir::Block *currBlock = builder.getBlock();
698 mlir::Block *labelBlock = currBlock;
699
700 if (!currBlock->empty() || currBlock->isEntryBlock()) {
701 {
702 mlir::OpBuilder::InsertionGuard guard(builder);
703 labelBlock = builder.createBlock(builder.getBlock()->getParent());
704 }
705 cir::BrOp::create(builder, getLoc(d.getSourceRange()), labelBlock);
706 }
707
708 builder.setInsertionPointToEnd(labelBlock);
709 cir::LabelOp label =
710 cir::LabelOp::create(builder, getLoc(d.getSourceRange()), d.getName());
711 builder.setInsertionPointToEnd(labelBlock);
712 auto func = cast<cir::FuncOp>(curFn);
713 cgm.mapBlockAddress(cir::BlockAddrInfoAttr::get(builder.getContext(),
714 func.getSymNameAttr(),
715 label.getLabelAttr()),
716 label);
717 // FIXME: emit debug info for labels, incrementProfileCounter
721 return mlir::success();
722}
723
725 builder.createBreak(getLoc(s.getKwLoc()));
726
727 // Insert the new block to continue codegen after the break statement.
728 builder.createBlock(builder.getBlock()->getParent());
729
730 return mlir::success();
731}
732
733template <typename T>
734mlir::LogicalResult
736 mlir::ArrayAttr value, CaseOpKind kind,
737 bool buildingTopLevelCase) {
738
740 "only case or default stmt go here");
741
742 mlir::LogicalResult result = mlir::success();
743
744 mlir::Location loc = getLoc(stmt->getBeginLoc());
745
746 enum class SubStmtKind { Case, Default, Other };
747 SubStmtKind subStmtKind = SubStmtKind::Other;
748 const Stmt *sub = stmt->getSubStmt();
749
750 mlir::OpBuilder::InsertPoint insertPoint;
751 CaseOp::create(builder, loc, value, kind, insertPoint);
752
753 {
754 mlir::OpBuilder::InsertionGuard guardSwitch(builder);
755 builder.restoreInsertionPoint(insertPoint);
756
757 if (isa<DefaultStmt>(sub) && isa<CaseStmt>(stmt)) {
758 subStmtKind = SubStmtKind::Default;
759 builder.createYield(loc);
760 } else if (isa<CaseStmt>(sub) && isa<DefaultStmt, CaseStmt>(stmt)) {
761 subStmtKind = SubStmtKind::Case;
762 builder.createYield(loc);
763 } else {
764 result = emitStmt(sub, /*useCurrentScope=*/!isa<CompoundStmt>(sub));
765 }
766
767 insertPoint = builder.saveInsertionPoint();
768 }
769
770 // If the substmt is default stmt or case stmt, try to handle the special case
771 // to make it into the simple form. e.g.
772 //
773 // swtich () {
774 // case 1:
775 // default:
776 // ...
777 // }
778 //
779 // we prefer generating
780 //
781 // cir.switch() {
782 // cir.case(equal, 1) {
783 // cir.yield
784 // }
785 // cir.case(default) {
786 // ...
787 // }
788 // }
789 //
790 // than
791 //
792 // cir.switch() {
793 // cir.case(equal, 1) {
794 // cir.case(default) {
795 // ...
796 // }
797 // }
798 // }
799 //
800 // We don't need to revert this if we find the current switch can't be in
801 // simple form later since the conversion itself should be harmless.
802 if (subStmtKind == SubStmtKind::Case) {
803 result = emitCaseStmt(*cast<CaseStmt>(sub), condType, buildingTopLevelCase);
804 } else if (subStmtKind == SubStmtKind::Default) {
805 result = emitDefaultStmt(*cast<DefaultStmt>(sub), condType,
806 buildingTopLevelCase);
807 } else if (buildingTopLevelCase) {
808 // If we're building a top level case, try to restore the insert point to
809 // the case we're building, then we can attach more random stmts to the
810 // case to make generating `cir.switch` operation to be a simple form.
811 builder.restoreInsertionPoint(insertPoint);
812 }
813
814 return result;
815}
816
817mlir::LogicalResult CIRGenFunction::emitCaseStmt(const CaseStmt &s,
818 mlir::Type condType,
819 bool buildingTopLevelCase) {
820 cir::CaseOpKind kind;
821 mlir::ArrayAttr value;
822 llvm::APSInt intVal = s.getLHS()->EvaluateKnownConstInt(getContext());
823
824 // If the case statement has an RHS value, it is representing a GNU
825 // case range statement, where LHS is the beginning of the range
826 // and RHS is the end of the range.
827 if (const Expr *rhs = s.getRHS()) {
828 llvm::APSInt endVal = rhs->EvaluateKnownConstInt(getContext());
829 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal),
830 cir::IntAttr::get(condType, endVal)});
831 kind = cir::CaseOpKind::Range;
832 } else {
833 value = builder.getArrayAttr({cir::IntAttr::get(condType, intVal)});
834 kind = cir::CaseOpKind::Equal;
835 }
836
837 return emitCaseDefaultCascade(&s, condType, value, kind,
838 buildingTopLevelCase);
839}
840
842 mlir::Type condType,
843 bool buildingTopLevelCase) {
844 return emitCaseDefaultCascade(&s, condType, builder.getArrayAttr({}),
845 cir::CaseOpKind::Default, buildingTopLevelCase);
846}
847
848mlir::LogicalResult CIRGenFunction::emitSwitchCase(const SwitchCase &s,
849 bool buildingTopLevelCase) {
850 assert(!condTypeStack.empty() &&
851 "build switch case without specifying the type of the condition");
852
853 if (s.getStmtClass() == Stmt::CaseStmtClass)
855 buildingTopLevelCase);
856
857 if (s.getStmtClass() == Stmt::DefaultStmtClass)
859 buildingTopLevelCase);
860
861 llvm_unreachable("expect case or default stmt");
862}
863
864mlir::LogicalResult
866 ArrayRef<const Attr *> forAttrs) {
867 cir::ForOp forOp;
868
869 // TODO(cir): pass in array of attributes.
870 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
871 mlir::LogicalResult loopRes = mlir::success();
872 // Evaluate the first pieces before the loop.
873 if (s.getInit())
874 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
875 return mlir::failure();
876 if (emitStmt(s.getRangeStmt(), /*useCurrentScope=*/true).failed())
877 return mlir::failure();
878 if (emitStmt(s.getBeginStmt(), /*useCurrentScope=*/true).failed())
879 return mlir::failure();
880 if (emitStmt(s.getEndStmt(), /*useCurrentScope=*/true).failed())
881 return mlir::failure();
882
884 // From LLVM: if there are any cleanups between here and the loop-exit
885 // scope, create a block to stage a loop exit along.
886 // We probably already do the right thing because of ScopeOp, but make
887 // sure we handle all cases.
889
890 forOp = builder.createFor(
891 getLoc(s.getSourceRange()),
892 /*condBuilder=*/
893 [&](mlir::OpBuilder &b, mlir::Location loc) {
894 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
895 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
896 mlir::Value condVal = evaluateExprAsBool(s.getCond());
897 builder.createCondition(condVal);
898 },
899 /*bodyBuilder=*/
900 [&](mlir::OpBuilder &b, mlir::Location loc) {
901 // https://en.cppreference.com/w/cpp/language/for
902 // In C++ the scope of the init-statement and the scope of
903 // statement are one and the same.
904 bool useCurrentScope = true;
905 if (emitStmt(s.getLoopVarStmt(), useCurrentScope).failed())
906 loopRes = mlir::failure();
907 if (emitStmt(s.getBody(), useCurrentScope).failed())
908 loopRes = mlir::failure();
909 emitStopPoint(&s);
910 },
911 /*stepBuilder=*/
912 [&](mlir::OpBuilder &b, mlir::Location loc) {
913 if (s.getInc())
914 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
915 loopRes = mlir::failure();
916 builder.createYield(loc);
917 });
918 return loopRes;
919 };
920
921 mlir::LogicalResult res = mlir::success();
922 mlir::Location scopeLoc = getLoc(s.getSourceRange());
923 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
924 [&](mlir::OpBuilder &b, mlir::Location loc) {
925 // Create a cleanup scope for the condition
926 // variable cleanups. Logical equivalent from
927 // LLVM codegn for LexicalScope
928 // ConditionScope(*this, S.getSourceRange())...
929 LexicalScope lexScope{*this, loc,
930 builder.getInsertionBlock()};
931 res = forStmtBuilder();
932 });
933
934 if (res.failed())
935 return res;
936
937 terminateBody(builder, forOp.getBody(), getLoc(s.getEndLoc()));
938 return mlir::success();
939}
940
941mlir::LogicalResult CIRGenFunction::emitForStmt(const ForStmt &s) {
942 cir::ForOp forOp;
943
944 // TODO: pass in an array of attributes.
945 auto forStmtBuilder = [&]() -> mlir::LogicalResult {
946 mlir::LogicalResult loopRes = mlir::success();
947 // Evaluate the first part before the loop.
948 if (s.getInit())
949 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
950 return mlir::failure();
952 // In the classic codegen, if there are any cleanups between here and the
953 // loop-exit scope, a block is created to stage the loop exit. We probably
954 // already do the right thing because of ScopeOp, but we need more testing
955 // to be sure we handle all cases.
957
958 forOp = builder.createFor(
959 getLoc(s.getSourceRange()),
960 /*condBuilder=*/
961 [&](mlir::OpBuilder &b, mlir::Location loc) {
962 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
963 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
964 mlir::Value condVal;
965 if (s.getCond()) {
966 // If the for statement has a condition scope,
967 // emit the local variable declaration.
968 if (s.getConditionVariable())
969 emitDecl(*s.getConditionVariable());
970 // C99 6.8.5p2/p4: The first substatement is executed if the
971 // expression compares unequal to 0. The condition must be a
972 // scalar type.
973 condVal = evaluateExprAsBool(s.getCond());
974 } else {
975 condVal = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
976 }
977 builder.createCondition(condVal);
978 },
979 /*bodyBuilder=*/
980 [&](mlir::OpBuilder &b, mlir::Location loc) {
981 // The scope of the for loop body is nested within the scope of the
982 // for loop's init-statement and condition.
983 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
984 loopRes = mlir::failure();
986 },
987 /*stepBuilder=*/
988 [&](mlir::OpBuilder &b, mlir::Location loc) {
989 if (s.getInc())
990 if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
991 loopRes = mlir::failure();
992 builder.createYield(loc);
993 });
994 return loopRes;
995 };
996
997 auto res = mlir::success();
998 auto scopeLoc = getLoc(s.getSourceRange());
999 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1000 [&](mlir::OpBuilder &b, mlir::Location loc) {
1001 LexicalScope lexScope{*this, loc,
1002 builder.getInsertionBlock()};
1003 res = forStmtBuilder();
1004 });
1005
1006 if (res.failed())
1007 return res;
1008
1009 terminateBody(builder, forOp.getBody(), getLoc(s.getEndLoc()));
1010 return mlir::success();
1011}
1012
1013mlir::LogicalResult CIRGenFunction::emitDoStmt(const DoStmt &s) {
1014 cir::DoWhileOp doWhileOp;
1015
1016 // TODO: pass in array of attributes.
1017 auto doStmtBuilder = [&]() -> mlir::LogicalResult {
1018 mlir::LogicalResult loopRes = mlir::success();
1020 // From LLVM: if there are any cleanups between here and the loop-exit
1021 // scope, create a block to stage a loop exit along.
1022 // We probably already do the right thing because of ScopeOp, but make
1023 // sure we handle all cases.
1025
1026 doWhileOp = builder.createDoWhile(
1027 getLoc(s.getSourceRange()),
1028 /*condBuilder=*/
1029 [&](mlir::OpBuilder &b, mlir::Location loc) {
1030 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
1031 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
1032 // C99 6.8.5p2/p4: The first substatement is executed if the
1033 // expression compares unequal to 0. The condition must be a
1034 // scalar type.
1035 mlir::Value condVal = evaluateExprAsBool(s.getCond());
1036 builder.createCondition(condVal);
1037 },
1038 /*bodyBuilder=*/
1039 [&](mlir::OpBuilder &b, mlir::Location loc) {
1040 // The scope of the do-while loop body is a nested scope.
1041 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
1042 loopRes = mlir::failure();
1043 emitStopPoint(&s);
1044 });
1045 return loopRes;
1046 };
1047
1048 mlir::LogicalResult res = mlir::success();
1049 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1050 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1051 [&](mlir::OpBuilder &b, mlir::Location loc) {
1052 LexicalScope lexScope{*this, loc,
1053 builder.getInsertionBlock()};
1054 res = doStmtBuilder();
1055 });
1056
1057 if (res.failed())
1058 return res;
1059
1060 terminateBody(builder, doWhileOp.getBody(), getLoc(s.getEndLoc()));
1061 return mlir::success();
1062}
1063
1064mlir::LogicalResult CIRGenFunction::emitWhileStmt(const WhileStmt &s) {
1065 cir::WhileOp whileOp;
1066
1067 // TODO: pass in array of attributes.
1068 auto whileStmtBuilder = [&]() -> mlir::LogicalResult {
1069 mlir::LogicalResult loopRes = mlir::success();
1071 // From LLVM: if there are any cleanups between here and the loop-exit
1072 // scope, create a block to stage a loop exit along.
1073 // We probably already do the right thing because of ScopeOp, but make
1074 // sure we handle all cases.
1076
1077 whileOp = builder.createWhile(
1078 getLoc(s.getSourceRange()),
1079 /*condBuilder=*/
1080 [&](mlir::OpBuilder &b, mlir::Location loc) {
1081 assert(!cir::MissingFeatures::createProfileWeightsForLoop());
1082 assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
1083 mlir::Value condVal;
1084 // If the for statement has a condition scope,
1085 // emit the local variable declaration.
1086 if (s.getConditionVariable())
1087 emitDecl(*s.getConditionVariable());
1088 // C99 6.8.5p2/p4: The first substatement is executed if the
1089 // expression compares unequal to 0. The condition must be a
1090 // scalar type.
1091 condVal = evaluateExprAsBool(s.getCond());
1092 builder.createCondition(condVal);
1093 },
1094 /*bodyBuilder=*/
1095 [&](mlir::OpBuilder &b, mlir::Location loc) {
1096 // The scope of the while loop body is a nested scope.
1097 if (emitStmt(s.getBody(), /*useCurrentScope=*/false).failed())
1098 loopRes = mlir::failure();
1099 emitStopPoint(&s);
1100 });
1101 return loopRes;
1102 };
1103
1104 mlir::LogicalResult res = mlir::success();
1105 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1106 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1107 [&](mlir::OpBuilder &b, mlir::Location loc) {
1108 LexicalScope lexScope{*this, loc,
1109 builder.getInsertionBlock()};
1110 res = whileStmtBuilder();
1111 });
1112
1113 if (res.failed())
1114 return res;
1115
1116 terminateBody(builder, whileOp.getBody(), getLoc(s.getEndLoc()));
1117 return mlir::success();
1118}
1119
1120mlir::LogicalResult CIRGenFunction::emitSwitchBody(const Stmt *s) {
1121 // It is rare but legal if the switch body is not a compound stmt. e.g.,
1122 //
1123 // switch(a)
1124 // while(...) {
1125 // case1
1126 // ...
1127 // case2
1128 // ...
1129 // }
1130 if (!isa<CompoundStmt>(s))
1131 return emitStmt(s, /*useCurrentScope=*/true);
1132
1134
1135 mlir::Block *swtichBlock = builder.getBlock();
1136 for (auto *c : compoundStmt->body()) {
1137 if (auto *switchCase = dyn_cast<SwitchCase>(c)) {
1138 builder.setInsertionPointToEnd(swtichBlock);
1139 // Reset insert point automatically, so that we can attach following
1140 // random stmt to the region of previous built case op to try to make
1141 // the being generated `cir.switch` to be in simple form.
1142 if (mlir::failed(
1143 emitSwitchCase(*switchCase, /*buildingTopLevelCase=*/true)))
1144 return mlir::failure();
1145
1146 continue;
1147 }
1148
1149 // Otherwise, just build the statements in the nearest case region.
1150 if (mlir::failed(emitStmt(c, /*useCurrentScope=*/!isa<CompoundStmt>(c))))
1151 return mlir::failure();
1152 }
1153
1154 return mlir::success();
1155}
1156
1158 // TODO: LLVM codegen does some early optimization to fold the condition and
1159 // only emit live cases. CIR should use MLIR to achieve similar things,
1160 // nothing to be done here.
1161 // if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue))...
1163
1164 SwitchOp swop;
1165 auto switchStmtBuilder = [&]() -> mlir::LogicalResult {
1166 if (s.getInit())
1167 if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
1168 return mlir::failure();
1169
1170 if (s.getConditionVariable())
1171 emitDecl(*s.getConditionVariable(), /*evaluateConditionDecl=*/true);
1172
1173 mlir::Value condV = emitScalarExpr(s.getCond());
1174
1175 // TODO: PGO and likelihood (e.g. PGO.haveRegionCounts())
1178 // TODO: if the switch has a condition wrapped by __builtin_unpredictable?
1180
1181 mlir::LogicalResult res = mlir::success();
1182 swop = SwitchOp::create(
1183 builder, getLoc(s.getBeginLoc()), condV,
1184 /*switchBuilder=*/
1185 [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) {
1186 curLexScope->setAsSwitch();
1187
1188 condTypeStack.push_back(condV.getType());
1189
1190 res = emitSwitchBody(s.getBody());
1191
1192 condTypeStack.pop_back();
1193 });
1194
1195 return res;
1196 };
1197
1198 // The switch scope contains the full source range for SwitchStmt.
1199 mlir::Location scopeLoc = getLoc(s.getSourceRange());
1200 mlir::LogicalResult res = mlir::success();
1201 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
1202 [&](mlir::OpBuilder &b, mlir::Location loc) {
1203 LexicalScope lexScope{*this, loc,
1204 builder.getInsertionBlock()};
1205 res = switchStmtBuilder();
1206 });
1207
1209 swop.collectCases(cases);
1210 for (auto caseOp : cases)
1211 terminateBody(builder, caseOp.getCaseRegion(), caseOp.getLoc());
1212 terminateBody(builder, swop.getBody(), swop.getLoc());
1213
1214 swop.setAllEnumCasesCovered(s.isAllEnumCasesCovered());
1215
1216 return res;
1217}
1218
1219void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue rv,
1220 QualType ty) {
1221 if (rv.isScalar()) {
1222 builder.createStore(loc, rv.getValue(), returnValue);
1223 } else if (rv.isAggregate()) {
1224 LValue dest = makeAddrLValue(returnValue, ty);
1227 } else {
1228 cgm.errorNYI(loc, "emitReturnOfRValue: complex return type");
1229 }
1230 mlir::Block *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
1232 cir::BrOp::create(builder, loc, retBlock);
1233 if (ehStack.stable_begin() != currentCleanupStackDepth)
1234 cgm.errorNYI(loc, "return of r-value with cleanup stack");
1235}
static void terminateBody(CIRGenBuilderTy &builder, mlir::Region &r, mlir::Location loc)
static mlir::LogicalResult emitStmtWithResult(CIRGenFunction &cgf, const Stmt *exprResult, AggValueSlot slot, Address *lastValue)
Defines the clang::Expr interface and subclasses for C++ expressions.
This file defines OpenACC AST classes for statement-level contructs.
This file defines OpenMP AST classes for executable directives and clauses.
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
BreakStmt - This represents a break.
Definition Stmt.h:3125
An aggregate value slot.
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
mlir::LogicalResult emitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPSimdDirective(const OMPSimdDirective &s)
mlir::LogicalResult emitDoStmt(const clang::DoStmt &s)
mlir::LogicalResult emitOMPCriticalDirective(const OMPCriticalDirective &s)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
mlir::LogicalResult emitOpenACCDataConstruct(const OpenACCDataConstruct &s)
mlir::LogicalResult emitOpenACCCombinedConstruct(const OpenACCCombinedConstruct &s)
mlir::LogicalResult emitOMPParallelMasterDirective(const OMPParallelMasterDirective &s)
mlir::LogicalResult emitOpenACCWaitConstruct(const OpenACCWaitConstruct &s)
mlir::LogicalResult emitOMPCancellationPointDirective(const OMPCancellationPointDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopDirective(const OMPParallelMaskedTaskLoopDirective &s)
mlir::LogicalResult emitOMPReverseDirective(const OMPReverseDirective &s)
const clang::LangOptions & getLangOpts() const
mlir::LogicalResult emitOpenACCUpdateConstruct(const OpenACCUpdateConstruct &s)
mlir::LogicalResult emitOMPTileDirective(const OMPTileDirective &s)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
mlir::LogicalResult emitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &s)
mlir::LogicalResult emitOMPBarrierDirective(const OMPBarrierDirective &s)
mlir::LogicalResult emitOMPTargetParallelDirective(const OMPTargetParallelDirective &s)
mlir::LogicalResult emitOpenACCCacheConstruct(const OpenACCCacheConstruct &s)
mlir::LogicalResult emitOMPTargetDirective(const OMPTargetDirective &s)
mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s, llvm::ArrayRef< const Attr * > attrs)
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
JumpDest returnBlock(mlir::Block *retBlock)
Unified return block.
mlir::LogicalResult emitOMPScopeDirective(const OMPScopeDirective &s)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::LogicalResult emitOMPDepobjDirective(const OMPDepobjDirective &s)
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::LogicalResult emitReturnStmt(const clang::ReturnStmt &s)
mlir::LogicalResult emitOpenACCInitConstruct(const OpenACCInitConstruct &s)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
mlir::LogicalResult emitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPUnrollDirective(const OMPUnrollDirective &s)
mlir::LogicalResult emitOMPTaskDirective(const OMPTaskDirective &s)
mlir::LogicalResult emitOpenACCSetConstruct(const OpenACCSetConstruct &s)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
mlir::LogicalResult emitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &s)
mlir::LogicalResult emitOMPCanonicalLoop(const OMPCanonicalLoop &s)
mlir::LogicalResult emitSwitchStmt(const clang::SwitchStmt &s)
mlir::LogicalResult emitOMPTeamsDirective(const OMPTeamsDirective &s)
mlir::LogicalResult emitCaseStmt(const clang::CaseStmt &s, mlir::Type condType, bool buildingTopLevelCase)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::LogicalResult emitOMPMaskedTaskLoopDirective(const OMPMaskedTaskLoopDirective &s)
mlir::LogicalResult emitOMPFuseDirective(const OMPFuseDirective &s)
mlir::LogicalResult emitSimpleStmt(const clang::Stmt *s, bool useCurrentScope)
mlir::LogicalResult emitOMPSectionDirective(const OMPSectionDirective &s)
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
mlir::LogicalResult emitAsmStmt(const clang::AsmStmt &s)
Definition CIRGenAsm.cpp:86
mlir::LogicalResult emitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &s)
mlir::LogicalResult emitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &s)
mlir::LogicalResult emitOpenACCComputeConstruct(const OpenACCComputeConstruct &s)
mlir::LogicalResult emitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &s)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
mlir::LogicalResult emitSwitchBody(const clang::Stmt *s)
mlir::LogicalResult emitForStmt(const clang::ForStmt &s)
mlir::LogicalResult emitOMPTaskwaitDirective(const OMPTaskwaitDirective &s)
mlir::LogicalResult emitOMPFlushDirective(const OMPFlushDirective &s)
mlir::LogicalResult emitOMPGenericLoopDirective(const OMPGenericLoopDirective &s)
mlir::LogicalResult emitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &s)
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
mlir::LogicalResult emitOMPOrderedDirective(const OMPOrderedDirective &s)
mlir::LogicalResult emitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &s)
mlir::LogicalResult emitOMPInterchangeDirective(const OMPInterchangeDirective &s)
mlir::LogicalResult emitOMPDispatchDirective(const OMPDispatchDirective &s)
mlir::LogicalResult emitOMPParallelDirective(const OMPParallelDirective &s)
mlir::LogicalResult emitOMPForSimdDirective(const OMPForSimdDirective &s)
mlir::LogicalResult emitOMPTaskLoopDirective(const OMPTaskLoopDirective &s)
Address returnValue
The temporary alloca to hold the return value.
mlir::LogicalResult emitOMPTargetDataDirective(const OMPTargetDataDirective &s)
mlir::LogicalResult emitLabel(const clang::LabelDecl &d)
mlir::LogicalResult emitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &s)
static bool hasAggregateEvaluationKind(clang::QualType type)
mlir::LogicalResult emitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &s)
mlir::LogicalResult emitOMPMaskedTaskLoopSimdDirective(const OMPMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPAtomicDirective(const OMPAtomicDirective &s)
mlir::LogicalResult emitOpenACCShutdownConstruct(const OpenACCShutdownConstruct &s)
mlir::LogicalResult emitBreakStmt(const clang::BreakStmt &s)
mlir::LogicalResult emitIndirectGotoStmt(const IndirectGotoStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTaskgroupDirective(const OMPTaskgroupDirective &s)
mlir::LogicalResult emitOMPParallelMaskedTaskLoopSimdDirective(const OMPParallelMaskedTaskLoopSimdDirective &s)
mlir::LogicalResult emitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &s)
void emitReturnOfRValue(mlir::Location loc, RValue rv, QualType ty)
mlir::LogicalResult emitOMPInteropDirective(const OMPInteropDirective &s)
mlir::LogicalResult emitOMPErrorDirective(const OMPErrorDirective &s)
mlir::LogicalResult emitOMPSingleDirective(const OMPSingleDirective &s)
mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s)
mlir::LogicalResult emitOMPTaskyieldDirective(const OMPTaskyieldDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &s)
mlir::LogicalResult emitOMPScanDirective(const OMPScanDirective &s)
llvm::SmallVector< mlir::Type, 2 > condTypeStack
The type of the condition for the emitting switch statement.
mlir::LogicalResult emitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &s)
void emitStopPoint(const Stmt *s)
Build a debug stoppoint if we are emitting debug info.
mlir::LogicalResult emitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &s)
mlir::LogicalResult emitOpenACCHostDataConstruct(const OpenACCHostDataConstruct &s)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
mlir::LogicalResult emitIfStmt(const clang::IfStmt &s)
mlir::LogicalResult emitOMPForDirective(const OMPForDirective &s)
mlir::LogicalResult emitOMPMasterDirective(const OMPMasterDirective &s)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
cir::BrOp emitBranchThroughCleanup(mlir::Location loc, JumpDest dest)
Build a unconditional branch to the lexical scope cleanup block or with the labeled blocked if alread...
mlir::LogicalResult emitSwitchCase(const clang::SwitchCase &s, bool buildingTopLevelCase)
mlir::LogicalResult emitOMPMetaDirective(const OMPMetaDirective &s)
mlir::LogicalResult emitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &s)
void emitDecl(const clang::Decl &d, bool evaluateConditionDecl=false)
mlir::LogicalResult emitOMPParallelGenericLoopDirective(const OMPParallelGenericLoopDirective &s)
mlir::LogicalResult emitOMPMaskedDirective(const OMPMaskedDirective &s)
llvm::DenseMap< const VarDecl *, mlir::Value > nrvoFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
mlir::LogicalResult emitOpenACCEnterDataConstruct(const OpenACCEnterDataConstruct &s)
mlir::LogicalResult emitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &s)
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &s)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
mlir::LogicalResult emitOMPParallelForDirective(const OMPParallelForDirective &s)
mlir::LogicalResult emitCaseDefaultCascade(const T *stmt, mlir::Type condType, mlir::ArrayAttr value, cir::CaseOpKind kind, bool buildingTopLevelCase)
mlir::LogicalResult emitOMPSectionsDirective(const OMPSectionsDirective &s)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
mlir::LogicalResult emitOMPDistributeDirective(const OMPDistributeDirective &s)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
mlir::LogicalResult emitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &s)
mlir::LogicalResult emitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &s)
mlir::LogicalResult emitDeclStmt(const clang::DeclStmt &s)
mlir::LogicalResult emitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &s)
mlir::LogicalResult emitDefaultStmt(const clang::DefaultStmt &s, mlir::Type condType, bool buildingTopLevelCase)
mlir::LogicalResult emitWhileStmt(const clang::WhileStmt &s)
mlir::LogicalResult emitLabelStmt(const clang::LabelStmt &s)
EHScopeStack::stable_iterator currentCleanupStackDepth
mlir::LogicalResult emitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &s)
clang::ASTContext & getContext() const
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
mlir::LogicalResult emitCompoundStmt(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitGotoStmt(const clang::GotoStmt &s)
mlir::LogicalResult emitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &s)
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
mlir::LogicalResult emitOMPCancelDirective(const OMPCancelDirective &s)
mlir::LogicalResult emitOMPStripeDirective(const OMPStripeDirective &s)
mlir::LogicalResult emitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &s)
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
mlir::LogicalResult emitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &s)
mlir::LogicalResult emitOpenACCExitDataConstruct(const OpenACCExitDataConstruct &s)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::LogicalResult emitOpenACCAtomicConstruct(const OpenACCAtomicConstruct &s)
mlir::LogicalResult emitOMPTargetSimdDirective(const OMPTargetSimdDirective &s)
mlir::LogicalResult emitOMPAssumeDirective(const OMPAssumeDirective &s)
mlir::LogicalResult emitOpenACCLoopConstruct(const OpenACCLoopConstruct &s)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
bool isAggregate() const
Definition CIRGenValue.h:51
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition StmtCXX.h:135
CaseStmt - Represent a case statement.
Definition Stmt.h:1910
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1730
ContinueStmt - This represents a continue.
Definition Stmt.h:3109
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition Stmt.h:1621
DoStmt - This represents a 'do/while' stmt.
Definition Stmt.h:2822
This represents one expression.
Definition Expr.h:112
QualType getType() const
Definition Expr.h:144
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition Stmt.h:2878
GotoStmt - This represents a direct goto.
Definition Stmt.h:2959
IfStmt - This represents an if/then/else.
Definition Stmt.h:2249
IndirectGotoStmt - This represents an indirect goto.
Definition Stmt.h:2998
Represents the declaration of a label.
Definition Decl.h:524
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.h:554
LabelStmt - Represents a label, which has a substatement.
Definition Stmt.h:2136
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A (possibly-)qualified type.
Definition TypeBase.h:937
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition Stmt.h:3150
Stmt - This represents one statement.
Definition Stmt.h:85
@ NoStmtClass
Definition Stmt.h:88
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
SwitchStmt - This represents a 'switch' stmt.
Definition Stmt.h:2499
bool isVoidType() const
Definition TypeBase.h:8892
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2687
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
const internal::VariadicDynCastAllOfMatcher< Stmt, CompoundStmt > compoundStmt
Matches compound statements.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, SwitchCase > switchCase
Matches case and default statements inside switch statements.
const internal::VariadicAllOfMatcher< Stmt > stmt
Matches statements.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
static bool aggValueSlotGC()
static bool loopInfoStack()
static bool emitCondLikelihoodViaExpectIntrinsic()
static bool constantFoldSwitchStatement()
static bool insertBuiltinUnpredictable()
static bool ehstackBranches()
static bool emitBranchThroughCleanup()
static bool requiresCleanups()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...