clang 22.0.0git
CIRGenExprScalar.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Expr nodes with scalar CIR types as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
14#include "CIRGenFunction.h"
15#include "CIRGenValue.h"
16
17#include "clang/AST/Expr.h"
21
22#include "mlir/IR/Location.h"
23#include "mlir/IR/Value.h"
24
25#include <cassert>
26#include <utility>
27
28using namespace clang;
29using namespace clang::CIRGen;
30
31namespace {
32
33struct BinOpInfo {
34 mlir::Value lhs;
35 mlir::Value rhs;
36 SourceRange loc;
37 QualType fullType; // Type of operands and result
38 QualType compType; // Type used for computations. Element type
39 // for vectors, otherwise same as FullType.
40 BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
41 FPOptions fpfeatures;
42 const Expr *e; // Entire expr, for error unsupported. May not be binop.
43
44 /// Check if the binop computes a division or a remainder.
45 bool isDivRemOp() const {
46 return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
47 opcode == BO_RemAssign;
48 }
49
50 /// Check if the binop can result in integer overflow.
51 bool mayHaveIntegerOverflow() const {
52 // Without constant input, we can't rule out overflow.
53 auto lhsci = lhs.getDefiningOp<cir::ConstantOp>();
54 auto rhsci = rhs.getDefiningOp<cir::ConstantOp>();
55 if (!lhsci || !rhsci)
56 return true;
57
59 // TODO(cir): For now we just assume that we might overflow
60 return true;
61 }
62
63 /// Check if at least one operand is a fixed point type. In such cases,
64 /// this operation did not follow usual arithmetic conversion and both
65 /// operands might not be of the same type.
66 bool isFixedPointOp() const {
67 // We cannot simply check the result type since comparison operations
68 // return an int.
69 if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) {
70 QualType lhstype = binOp->getLHS()->getType();
71 QualType rhstype = binOp->getRHS()->getType();
72 return lhstype->isFixedPointType() || rhstype->isFixedPointType();
73 }
74 if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e))
75 return unop->getSubExpr()->getType()->isFixedPointType();
76 return false;
77 }
78};
79
80class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
81 CIRGenFunction &cgf;
82 CIRGenBuilderTy &builder;
83 // Unlike classic codegen we set this to false or use std::exchange to read
84 // the value instead of calling TestAndClearIgnoreResultAssign to make it
85 // explicit when the value is used
86 bool ignoreResultAssign;
87
88public:
89 ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder,
90 bool ignoreResultAssign = false)
91 : cgf(cgf), builder(builder), ignoreResultAssign(ignoreResultAssign) {}
92
93 //===--------------------------------------------------------------------===//
94 // Utilities
95 //===--------------------------------------------------------------------===//
96 mlir::Type convertType(QualType ty) { return cgf.convertType(ty); }
97
98 mlir::Value emitComplexToScalarConversion(mlir::Location loc,
99 mlir::Value value, CastKind kind,
100 QualType destTy);
101
102 mlir::Value emitNullValue(QualType ty, mlir::Location loc) {
103 return cgf.cgm.emitNullConstant(ty, loc);
104 }
105
106 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
107 return builder.createFloatingCast(result, cgf.convertType(promotionType));
108 }
109
110 mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
111 return builder.createFloatingCast(result, cgf.convertType(exprType));
112 }
113
114 mlir::Value emitPromoted(const Expr *e, QualType promotionType);
115
116 mlir::Value maybePromoteBoolResult(mlir::Value value,
117 mlir::Type dstTy) const {
118 if (mlir::isa<cir::IntType>(dstTy))
119 return builder.createBoolToInt(value, dstTy);
120 if (mlir::isa<cir::BoolType>(dstTy))
121 return value;
122 llvm_unreachable("Can only promote integer or boolean types");
123 }
124
125 //===--------------------------------------------------------------------===//
126 // Visitor Methods
127 //===--------------------------------------------------------------------===//
128
129 mlir::Value Visit(Expr *e) {
130 return StmtVisitor<ScalarExprEmitter, mlir::Value>::Visit(e);
131 }
132
133 mlir::Value VisitStmt(Stmt *s) {
134 llvm_unreachable("Statement passed to ScalarExprEmitter");
135 }
136
137 mlir::Value VisitExpr(Expr *e) {
138 cgf.getCIRGenModule().errorNYI(
139 e->getSourceRange(), "scalar expression kind: ", e->getStmtClassName());
140 return {};
141 }
142
143 mlir::Value VisitConstantExpr(ConstantExpr *e) {
144 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: constant expr");
145 return {};
146 }
147
148 mlir::Value VisitPackIndexingExpr(PackIndexingExpr *e) {
149 return Visit(e->getSelectedExpr());
150 }
151
152 mlir::Value VisitParenExpr(ParenExpr *pe) { return Visit(pe->getSubExpr()); }
153
154 mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
155 return Visit(ge->getResultExpr());
156 }
157
158 /// Emits the address of the l-value, then loads and returns the result.
159 mlir::Value emitLoadOfLValue(const Expr *e) {
160 LValue lv = cgf.emitLValue(e);
161 // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V);
162 return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
163 }
164
165 mlir::Value VisitCoawaitExpr(CoawaitExpr *s) {
166 return cgf.emitCoawaitExpr(*s).getValue();
167 }
168 mlir::Value VisitCoyieldExpr(CoyieldExpr *e) {
169 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: coyield");
170 return {};
171 }
172 mlir::Value VisitUnaryCoawait(const UnaryOperator *e) {
173 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: unary coawait");
174 return {};
175 }
176
177 mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
178 return cgf.emitLoadOfLValue(lv, loc).getValue();
179 }
180
181 // l-values
182 mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
183 if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(e))
184 return cgf.emitScalarConstant(constant, e);
185
186 return emitLoadOfLValue(e);
187 }
188
189 mlir::Value VisitAddrLabelExpr(const AddrLabelExpr *e) {
190 auto func = cast<cir::FuncOp>(cgf.curFn);
191 cir::BlockAddrInfoAttr blockInfoAttr = cir::BlockAddrInfoAttr::get(
192 &cgf.getMLIRContext(), func.getSymName(), e->getLabel()->getName());
193 cir::BlockAddressOp blockAddressOp = cir::BlockAddressOp::create(
194 builder, cgf.getLoc(e->getSourceRange()), cgf.convertType(e->getType()),
195 blockInfoAttr);
196 cir::LabelOp resolvedLabel = cgf.cgm.lookupBlockAddressInfo(blockInfoAttr);
197 if (!resolvedLabel) {
198 cgf.cgm.mapUnresolvedBlockAddress(blockAddressOp);
199 // Still add the op to maintain insertion order it will be resolved in
200 // resolveBlockAddresses
201 cgf.cgm.mapResolvedBlockAddress(blockAddressOp, nullptr);
202 } else {
203 cgf.cgm.mapResolvedBlockAddress(blockAddressOp, resolvedLabel);
204 }
205 cgf.instantiateIndirectGotoBlock();
206 return blockAddressOp;
207 }
208
209 mlir::Value VisitIntegerLiteral(const IntegerLiteral *e) {
210 mlir::Type type = cgf.convertType(e->getType());
211 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
212 cir::IntAttr::get(type, e->getValue()));
213 }
214
215 mlir::Value VisitFixedPointLiteral(const FixedPointLiteral *e) {
216 cgf.cgm.errorNYI(e->getSourceRange(),
217 "ScalarExprEmitter: fixed point literal");
218 return {};
219 }
220
221 mlir::Value VisitFloatingLiteral(const FloatingLiteral *e) {
222 mlir::Type type = cgf.convertType(e->getType());
223 assert(mlir::isa<cir::FPTypeInterface>(type) &&
224 "expect floating-point type");
225 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
226 cir::FPAttr::get(type, e->getValue()));
227 }
228
229 mlir::Value VisitCharacterLiteral(const CharacterLiteral *e) {
230 mlir::Type ty = cgf.convertType(e->getType());
231 auto init = cir::IntAttr::get(ty, e->getValue());
232 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()), init);
233 }
234
235 mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *e) {
236 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
237 }
238
239 mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *e) {
240 if (e->getType()->isVoidType())
241 return {};
242
243 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
244 }
245
246 mlir::Value VisitGNUNullExpr(const GNUNullExpr *e) {
247 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
248 }
249
250 mlir::Value VisitOffsetOfExpr(OffsetOfExpr *e);
251
252 mlir::Value VisitSizeOfPackExpr(SizeOfPackExpr *e) {
253 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: size of pack");
254 return {};
255 }
256 mlir::Value VisitPseudoObjectExpr(PseudoObjectExpr *e) {
257 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: pseudo object");
258 return {};
259 }
260 mlir::Value VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *e) {
261 cgf.cgm.errorNYI(e->getSourceRange(),
262 "ScalarExprEmitter: sycl unique stable name");
263 return {};
264 }
265 mlir::Value VisitEmbedExpr(EmbedExpr *e) {
266 assert(e->getDataElementCount() == 1);
267 auto it = e->begin();
268 llvm::APInt value = (*it)->getValue();
269 return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value,
271 }
272 mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *e) {
273 if (e->isGLValue())
274 return emitLoadOfLValue(cgf.getOrCreateOpaqueLValueMapping(e),
275 e->getExprLoc());
276
277 // Otherwise, assume the mapping is the scalar directly.
278 return cgf.getOrCreateOpaqueRValueMapping(e).getValue();
279 }
280
281 mlir::Value VisitObjCSelectorExpr(ObjCSelectorExpr *e) {
282 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc selector");
283 return {};
284 }
285 mlir::Value VisitObjCProtocolExpr(ObjCProtocolExpr *e) {
286 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc protocol");
287 return {};
288 }
289 mlir::Value VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
290 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc ivar ref");
291 return {};
292 }
293 mlir::Value VisitObjCMessageExpr(ObjCMessageExpr *e) {
294 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc message");
295 return {};
296 }
297 mlir::Value VisitObjCIsaExpr(ObjCIsaExpr *e) {
298 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc isa");
299 return {};
300 }
301 mlir::Value VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *e) {
302 cgf.cgm.errorNYI(e->getSourceRange(),
303 "ScalarExprEmitter: objc availability check");
304 return {};
305 }
306
307 mlir::Value VisitMatrixSubscriptExpr(MatrixSubscriptExpr *e) {
308 cgf.cgm.errorNYI(e->getSourceRange(),
309 "ScalarExprEmitter: matrix subscript");
310 return {};
311 }
312
313 mlir::Value VisitCastExpr(CastExpr *e);
314 mlir::Value VisitCallExpr(const CallExpr *e);
315
316 mlir::Value VisitStmtExpr(StmtExpr *e) {
317 CIRGenFunction::StmtExprEvaluation eval(cgf);
318 if (e->getType()->isVoidType()) {
319 (void)cgf.emitCompoundStmt(*e->getSubStmt());
320 return {};
321 }
322
323 Address retAlloca =
324 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
325 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca);
326
327 return cgf.emitLoadOfScalar(cgf.makeAddrLValue(retAlloca, e->getType()),
328 e->getExprLoc());
329 }
330
331 mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
332 ignoreResultAssign = false;
333
334 if (e->getBase()->getType()->isVectorType()) {
336
337 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
338 const mlir::Value vecValue = Visit(e->getBase());
339 const mlir::Value indexValue = Visit(e->getIdx());
340 return cir::VecExtractOp::create(cgf.builder, loc, vecValue, indexValue);
341 }
342 // Just load the lvalue formed by the subscript expression.
343 return emitLoadOfLValue(e);
344 }
345
346 mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *e) {
347 if (e->getNumSubExprs() == 2) {
348 // The undocumented form of __builtin_shufflevector.
349 mlir::Value inputVec = Visit(e->getExpr(0));
350 mlir::Value indexVec = Visit(e->getExpr(1));
351 return cir::VecShuffleDynamicOp::create(
352 cgf.builder, cgf.getLoc(e->getSourceRange()), inputVec, indexVec);
353 }
354
355 mlir::Value vec1 = Visit(e->getExpr(0));
356 mlir::Value vec2 = Visit(e->getExpr(1));
357
358 // The documented form of __builtin_shufflevector, where the indices are
359 // a variable number of integer constants. The constants will be stored
360 // in an ArrayAttr.
361 SmallVector<mlir::Attribute, 8> indices;
362 for (unsigned i = 2; i < e->getNumSubExprs(); ++i) {
363 indices.push_back(
364 cir::IntAttr::get(cgf.builder.getSInt64Ty(),
365 e->getExpr(i)
366 ->EvaluateKnownConstInt(cgf.getContext())
367 .getSExtValue()));
368 }
369
370 return cir::VecShuffleOp::create(cgf.builder,
371 cgf.getLoc(e->getSourceRange()),
372 cgf.convertType(e->getType()), vec1, vec2,
373 cgf.builder.getArrayAttr(indices));
374 }
375
376 mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *e) {
377 // __builtin_convertvector is an element-wise cast, and is implemented as a
378 // regular cast. The back end handles casts of vectors correctly.
379 return emitScalarConversion(Visit(e->getSrcExpr()),
380 e->getSrcExpr()->getType(), e->getType(),
381 e->getSourceRange().getBegin());
382 }
383
384 mlir::Value VisitExtVectorElementExpr(Expr *e) { return emitLoadOfLValue(e); }
385
386 mlir::Value VisitMemberExpr(MemberExpr *e);
387
388 mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
389 return emitLoadOfLValue(e);
390 }
391
392 mlir::Value VisitInitListExpr(InitListExpr *e);
393
394 mlir::Value VisitArrayInitIndexExpr(ArrayInitIndexExpr *e) {
395 cgf.cgm.errorNYI(e->getSourceRange(),
396 "ScalarExprEmitter: array init index");
397 return {};
398 }
399
400 mlir::Value VisitImplicitValueInitExpr(const ImplicitValueInitExpr *e) {
401 cgf.cgm.errorNYI(e->getSourceRange(),
402 "ScalarExprEmitter: implicit value init");
403 return {};
404 }
405
406 mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
407 return VisitCastExpr(e);
408 }
409
410 mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *e) {
411 return cgf.cgm.emitNullConstant(e->getType(),
412 cgf.getLoc(e->getSourceRange()));
413 }
414
415 /// Perform a pointer to boolean conversion.
416 mlir::Value emitPointerToBoolConversion(mlir::Value v, QualType qt) {
417 // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM.
418 // We might want to have a separate pass for these types of conversions.
419 return cgf.getBuilder().createPtrToBoolCast(v);
420 }
421
422 mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) {
423 cir::BoolType boolTy = builder.getBoolTy();
424 return cir::CastOp::create(builder, loc, boolTy,
425 cir::CastKind::float_to_bool, src);
426 }
427
428 mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) {
429 // Because of the type rules of C, we often end up computing a
430 // logical value, then zero extending it to int, then wanting it
431 // as a logical value again.
432 // TODO: optimize this common case here or leave it for later
433 // CIR passes?
434 cir::BoolType boolTy = builder.getBoolTy();
435 return cir::CastOp::create(builder, loc, boolTy, cir::CastKind::int_to_bool,
436 srcVal);
437 }
438
439 /// Convert the specified expression value to a boolean (!cir.bool) truth
440 /// value. This is equivalent to "Val != 0".
441 mlir::Value emitConversionToBool(mlir::Value src, QualType srcType,
442 mlir::Location loc) {
443 assert(srcType.isCanonical() && "EmitScalarConversion strips typedefs");
444
445 if (srcType->isRealFloatingType())
446 return emitFloatToBoolConversion(src, loc);
447
448 if (llvm::isa<MemberPointerType>(srcType)) {
449 cgf.getCIRGenModule().errorNYI(loc, "member pointer to bool conversion");
450 return builder.getFalse(loc);
451 }
452
453 if (srcType->isIntegerType())
454 return emitIntToBoolConversion(src, loc);
455
456 assert(::mlir::isa<cir::PointerType>(src.getType()));
457 return emitPointerToBoolConversion(src, srcType);
458 }
459
460 // Emit a conversion from the specified type to the specified destination
461 // type, both of which are CIR scalar types.
462 struct ScalarConversionOpts {
463 bool treatBooleanAsSigned;
464 bool emitImplicitIntegerTruncationChecks;
465 bool emitImplicitIntegerSignChangeChecks;
466
467 ScalarConversionOpts()
468 : treatBooleanAsSigned(false),
469 emitImplicitIntegerTruncationChecks(false),
470 emitImplicitIntegerSignChangeChecks(false) {}
471
472 ScalarConversionOpts(clang::SanitizerSet sanOpts)
473 : treatBooleanAsSigned(false),
474 emitImplicitIntegerTruncationChecks(
475 sanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
476 emitImplicitIntegerSignChangeChecks(
477 sanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
478 };
479
480 // Conversion from bool, integral, or floating-point to integral or
481 // floating-point. Conversions involving other types are handled elsewhere.
482 // Conversion to bool is handled elsewhere because that's a comparison against
483 // zero, not a simple cast. This handles both individual scalars and vectors.
484 mlir::Value emitScalarCast(mlir::Value src, QualType srcType,
485 QualType dstType, mlir::Type srcTy,
486 mlir::Type dstTy, ScalarConversionOpts opts) {
487 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
488 "Internal error: matrix types not handled by this function.");
489 assert(!(mlir::isa<mlir::IntegerType>(srcTy) ||
490 mlir::isa<mlir::IntegerType>(dstTy)) &&
491 "Obsolete code. Don't use mlir::IntegerType with CIR.");
492
493 mlir::Type fullDstTy = dstTy;
494 if (mlir::isa<cir::VectorType>(srcTy) &&
495 mlir::isa<cir::VectorType>(dstTy)) {
496 // Use the element types of the vectors to figure out the CastKind.
497 srcTy = mlir::dyn_cast<cir::VectorType>(srcTy).getElementType();
498 dstTy = mlir::dyn_cast<cir::VectorType>(dstTy).getElementType();
499 }
500
501 std::optional<cir::CastKind> castKind;
502
503 if (mlir::isa<cir::BoolType>(srcTy)) {
504 if (opts.treatBooleanAsSigned)
505 cgf.getCIRGenModule().errorNYI("signed bool");
506 if (cgf.getBuilder().isInt(dstTy))
507 castKind = cir::CastKind::bool_to_int;
508 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
509 castKind = cir::CastKind::bool_to_float;
510 else
511 llvm_unreachable("Internal error: Cast to unexpected type");
512 } else if (cgf.getBuilder().isInt(srcTy)) {
513 if (cgf.getBuilder().isInt(dstTy))
514 castKind = cir::CastKind::integral;
515 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
516 castKind = cir::CastKind::int_to_float;
517 else
518 llvm_unreachable("Internal error: Cast to unexpected type");
519 } else if (mlir::isa<cir::FPTypeInterface>(srcTy)) {
520 if (cgf.getBuilder().isInt(dstTy)) {
521 // If we can't recognize overflow as undefined behavior, assume that
522 // overflow saturates. This protects against normal optimizations if we
523 // are compiling with non-standard FP semantics.
524 if (!cgf.cgm.getCodeGenOpts().StrictFloatCastOverflow)
525 cgf.getCIRGenModule().errorNYI("strict float cast overflow");
527 castKind = cir::CastKind::float_to_int;
528 } else if (mlir::isa<cir::FPTypeInterface>(dstTy)) {
529 // TODO: split this to createFPExt/createFPTrunc
530 return builder.createFloatingCast(src, fullDstTy);
531 } else {
532 llvm_unreachable("Internal error: Cast to unexpected type");
533 }
534 } else {
535 llvm_unreachable("Internal error: Cast from unexpected type");
536 }
537
538 assert(castKind.has_value() && "Internal error: CastKind not set.");
539 return cir::CastOp::create(builder, src.getLoc(), fullDstTy, *castKind,
540 src);
541 }
542
543 mlir::Value
544 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
545 return Visit(e->getReplacement());
546 }
547
548 mlir::Value VisitVAArgExpr(VAArgExpr *ve) {
549 QualType ty = ve->getType();
550
551 if (ty->isVariablyModifiedType()) {
552 cgf.cgm.errorNYI(ve->getSourceRange(),
553 "variably modified types in varargs");
554 }
555
556 return cgf.emitVAArg(ve);
557 }
558
559 mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
560 return Visit(e->getSemanticForm());
561 }
562
563 mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
564 mlir::Value
565 VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
566
567 // Unary Operators.
568 mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
569 LValue lv = cgf.emitLValue(e->getSubExpr());
570 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, false);
571 }
572 mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
573 LValue lv = cgf.emitLValue(e->getSubExpr());
574 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, false);
575 }
576 mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
577 LValue lv = cgf.emitLValue(e->getSubExpr());
578 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, true);
579 }
580 mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
581 LValue lv = cgf.emitLValue(e->getSubExpr());
582 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, true);
583 }
584 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
585 cir::UnaryOpKind kind, bool isPre) {
586 if (cgf.getLangOpts().OpenMP)
587 cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
588
589 QualType type = e->getSubExpr()->getType();
590
591 mlir::Value value;
592 mlir::Value input;
593
594 if (type->getAs<AtomicType>()) {
595 cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
596 // TODO(cir): This is not correct, but it will produce reasonable code
597 // until atomic operations are implemented.
598 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
599 input = value;
600 } else {
601 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
602 input = value;
603 }
604
605 // NOTE: When possible, more frequent cases are handled first.
606
607 // Special case of integer increment that we have to check first: bool++.
608 // Due to promotion rules, we get:
609 // bool++ -> bool = bool + 1
610 // -> bool = (int)bool + 1
611 // -> bool = ((int)bool + 1 != 0)
612 // An interesting aspect of this is that increment is always true.
613 // Decrement does not have this property.
614 if (kind == cir::UnaryOpKind::Inc && type->isBooleanType()) {
615 value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
616 } else if (type->isIntegerType()) {
617 QualType promotedType;
618 [[maybe_unused]] bool canPerformLossyDemotionCheck = false;
619 if (cgf.getContext().isPromotableIntegerType(type)) {
620 promotedType = cgf.getContext().getPromotedIntegerType(type);
621 assert(promotedType != type && "Shouldn't promote to the same type.");
622 canPerformLossyDemotionCheck = true;
623 canPerformLossyDemotionCheck &=
624 cgf.getContext().getCanonicalType(type) !=
625 cgf.getContext().getCanonicalType(promotedType);
626 canPerformLossyDemotionCheck &=
627 type->isIntegerType() && promotedType->isIntegerType();
628
629 // TODO(cir): Currently, we store bitwidths in CIR types only for
630 // integers. This might also be required for other types.
631
632 assert(
633 (!canPerformLossyDemotionCheck ||
634 type->isSignedIntegerOrEnumerationType() ||
635 promotedType->isSignedIntegerOrEnumerationType() ||
636 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth() ==
637 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth()) &&
638 "The following check expects that if we do promotion to different "
639 "underlying canonical type, at least one of the types (either "
640 "base or promoted) will be signed, or the bitwidths will match.");
641 }
642
644 if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
645 value = emitIncDecConsiderOverflowBehavior(e, value, kind);
646 } else {
647 cir::UnaryOpKind kind =
648 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
649 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
650 value = emitUnaryOp(e, kind, input, /*nsw=*/false);
651 }
652 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
653 QualType type = ptr->getPointeeType();
654 if (cgf.getContext().getAsVariableArrayType(type)) {
655 // VLA types don't have constant size.
656 cgf.cgm.errorNYI(e->getSourceRange(), "Pointer arithmetic on VLA");
657 return {};
658 } else if (type->isFunctionType()) {
659 // Arithmetic on function pointers (!) is just +-1.
660 cgf.cgm.errorNYI(e->getSourceRange(),
661 "Pointer arithmetic on function pointer");
662 return {};
663 } else {
664 // For everything else, we can just do a simple increment.
665 mlir::Location loc = cgf.getLoc(e->getSourceRange());
666 CIRGenBuilderTy &builder = cgf.getBuilder();
667 int amount = kind == cir::UnaryOpKind::Inc ? 1 : -1;
668 mlir::Value amt = builder.getSInt32(amount, loc);
670 value = builder.createPtrStride(loc, value, amt);
671 }
672 } else if (type->isVectorType()) {
673 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
674 return {};
675 } else if (type->isRealFloatingType()) {
677
678 if (type->isHalfType() &&
679 !cgf.getContext().getLangOpts().NativeHalfType) {
680 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
681 return {};
682 }
683
684 if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
685 // Create the inc/dec operation.
686 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
687 assert(kind == cir::UnaryOpKind::Inc ||
688 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
689 value = emitUnaryOp(e, kind, value);
690 } else {
691 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
692 return {};
693 }
694 } else if (type->isFixedPointType()) {
695 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
696 return {};
697 } else {
698 assert(type->castAs<ObjCObjectPointerType>());
699 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC pointer");
700 return {};
701 }
702
703 CIRGenFunction::SourceLocRAIIObject sourceloc{
704 cgf, cgf.getLoc(e->getSourceRange())};
705
706 // Store the updated result through the lvalue
707 if (lv.isBitField())
708 return cgf.emitStoreThroughBitfieldLValue(RValue::get(value), lv);
709 else
710 cgf.emitStoreThroughLValue(RValue::get(value), lv);
711
712 // If this is a postinc, return the value read from memory, otherwise use
713 // the updated value.
714 return isPre ? value : input;
715 }
716
717 mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
718 mlir::Value inVal,
719 cir::UnaryOpKind kind) {
720 assert(kind == cir::UnaryOpKind::Inc ||
721 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
722 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
723 case LangOptions::SOB_Defined:
724 return emitUnaryOp(e, kind, inVal, /*nsw=*/false);
725 case LangOptions::SOB_Undefined:
727 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
728 case LangOptions::SOB_Trapping:
729 if (!e->canOverflow())
730 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
731 cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
732 return {};
733 }
734 llvm_unreachable("Unexpected signed overflow behavior kind");
735 }
736
737 mlir::Value VisitUnaryAddrOf(const UnaryOperator *e) {
738 if (llvm::isa<MemberPointerType>(e->getType()))
739 return cgf.cgm.emitMemberPointerConstant(e);
740
741 return cgf.emitLValue(e->getSubExpr()).getPointer();
742 }
743
744 mlir::Value VisitUnaryDeref(const UnaryOperator *e) {
745 if (e->getType()->isVoidType())
746 return Visit(e->getSubExpr()); // the actual value should be unused
747 return emitLoadOfLValue(e);
748 }
749
750 mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
751 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
752 mlir::Value result =
753 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus, promotionType);
754 if (result && !promotionType.isNull())
755 return emitUnPromotedValue(result, e->getType());
756 return result;
757 }
758
759 mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
760 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
761 mlir::Value result =
762 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus, promotionType);
763 if (result && !promotionType.isNull())
764 return emitUnPromotedValue(result, e->getType());
765 return result;
766 }
767
768 mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
769 cir::UnaryOpKind kind,
770 QualType promotionType) {
771 ignoreResultAssign = false;
772 mlir::Value operand;
773 if (!promotionType.isNull())
774 operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
775 else
776 operand = Visit(e->getSubExpr());
777
778 bool nsw =
779 kind == cir::UnaryOpKind::Minus && e->getType()->isSignedIntegerType();
780
781 // NOTE: LLVM codegen will lower this directly to either a FNeg
782 // or a Sub instruction. In CIR this will be handled later in LowerToLLVM.
783 return emitUnaryOp(e, kind, operand, nsw);
784 }
785
786 mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
787 mlir::Value input, bool nsw = false) {
788 return cir::UnaryOp::create(builder,
789 cgf.getLoc(e->getSourceRange().getBegin()),
790 input.getType(), kind, input, nsw);
791 }
792
793 mlir::Value VisitUnaryNot(const UnaryOperator *e) {
794 ignoreResultAssign = false;
795 mlir::Value op = Visit(e->getSubExpr());
796 return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
797 }
798
799 mlir::Value VisitUnaryLNot(const UnaryOperator *e);
800
801 mlir::Value VisitUnaryReal(const UnaryOperator *e);
802 mlir::Value VisitUnaryImag(const UnaryOperator *e);
803 mlir::Value VisitRealImag(const UnaryOperator *e,
804 QualType promotionType = QualType());
805
806 mlir::Value VisitUnaryExtension(const UnaryOperator *e) {
807 return Visit(e->getSubExpr());
808 }
809
810 // C++
811 mlir::Value VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e) {
812 cgf.cgm.errorNYI(e->getSourceRange(),
813 "ScalarExprEmitter: materialize temporary");
814 return {};
815 }
816 mlir::Value VisitSourceLocExpr(SourceLocExpr *e) {
817 ASTContext &ctx = cgf.getContext();
818 APValue evaluated =
819 e->EvaluateInContext(ctx, cgf.curSourceLocExprScope.getDefaultExpr());
820 mlir::Attribute attribute = ConstantEmitter(cgf).emitAbstract(
821 e->getLocation(), evaluated, e->getType());
822 mlir::TypedAttr typedAttr = mlir::cast<mlir::TypedAttr>(attribute);
823 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
824 typedAttr);
825 }
826 mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
827 CIRGenFunction::CXXDefaultArgExprScope scope(cgf, dae);
828 return Visit(dae->getExpr());
829 }
830 mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
831 CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
832 return Visit(die->getExpr());
833 }
834
835 mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
836
837 mlir::Value VisitExprWithCleanups(ExprWithCleanups *e);
838 mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
839 return cgf.emitCXXNewExpr(e);
840 }
841 mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *e) {
842 cgf.emitCXXDeleteExpr(e);
843 return {};
844 }
845 mlir::Value VisitTypeTraitExpr(const TypeTraitExpr *e) {
846 mlir::Location loc = cgf.getLoc(e->getExprLoc());
847 if (e->isStoredAsBoolean())
848 return builder.getBool(e->getBoolValue(), loc);
849 cgf.cgm.errorNYI(e->getSourceRange(),
850 "ScalarExprEmitter: TypeTraitExpr stored as int");
851 return {};
852 }
853 mlir::Value
854 VisitConceptSpecializationExpr(const ConceptSpecializationExpr *e) {
855 return builder.getBool(e->isSatisfied(), cgf.getLoc(e->getExprLoc()));
856 }
857 mlir::Value VisitRequiresExpr(const RequiresExpr *e) {
858 return builder.getBool(e->isSatisfied(), cgf.getLoc(e->getExprLoc()));
859 }
860 mlir::Value VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *e) {
861 mlir::Type type = cgf.convertType(e->getType());
862 mlir::Location loc = cgf.getLoc(e->getExprLoc());
863 return builder.getConstInt(loc, type, e->getValue());
864 }
865 mlir::Value VisitExpressionTraitExpr(const ExpressionTraitExpr *e) {
866 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
867 }
868 mlir::Value VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *e) {
869 cgf.cgm.errorNYI(e->getSourceRange(),
870 "ScalarExprEmitter: cxx pseudo destructor");
871 return {};
872 }
873 mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
874 cgf.emitCXXThrowExpr(e);
875 return {};
876 }
877
878 mlir::Value VisitCXXNoexceptExpr(CXXNoexceptExpr *e) {
879 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
880 }
881
882 /// Emit a conversion from the specified type to the specified destination
883 /// type, both of which are CIR scalar types.
884 /// TODO: do we need ScalarConversionOpts here? Should be done in another
885 /// pass.
886 mlir::Value
887 emitScalarConversion(mlir::Value src, QualType srcType, QualType dstType,
888 SourceLocation loc,
889 ScalarConversionOpts opts = ScalarConversionOpts()) {
890 // All conversions involving fixed point types should be handled by the
891 // emitFixedPoint family functions. This is done to prevent bloating up
892 // this function more, and although fixed point numbers are represented by
893 // integers, we do not want to follow any logic that assumes they should be
894 // treated as integers.
895 // TODO(leonardchan): When necessary, add another if statement checking for
896 // conversions to fixed point types from other types.
897 // conversions to fixed point types from other types.
898 if (srcType->isFixedPointType() || dstType->isFixedPointType()) {
899 cgf.getCIRGenModule().errorNYI(loc, "fixed point conversions");
900 return {};
901 }
902
903 srcType = srcType.getCanonicalType();
904 dstType = dstType.getCanonicalType();
905 if (srcType == dstType) {
906 if (opts.emitImplicitIntegerSignChangeChecks)
907 cgf.getCIRGenModule().errorNYI(loc,
908 "implicit integer sign change checks");
909 return src;
910 }
911
912 if (dstType->isVoidType())
913 return {};
914
915 mlir::Type mlirSrcType = src.getType();
916
917 // Handle conversions to bool first, they are special: comparisons against
918 // 0.
919 if (dstType->isBooleanType())
920 return emitConversionToBool(src, srcType, cgf.getLoc(loc));
921
922 mlir::Type mlirDstType = cgf.convertType(dstType);
923
924 if (srcType->isHalfType() &&
925 !cgf.getContext().getLangOpts().NativeHalfType) {
926 // Cast to FP using the intrinsic if the half type itself isn't supported.
927 if (mlir::isa<cir::FPTypeInterface>(mlirDstType)) {
928 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
929 cgf.getCIRGenModule().errorNYI(loc,
930 "cast via llvm.convert.from.fp16");
931 } else {
932 // Cast to other types through float, using either the intrinsic or
933 // FPExt, depending on whether the half type itself is supported (as
934 // opposed to operations on half, available with NativeHalfType).
935 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
936 cgf.getCIRGenModule().errorNYI(loc,
937 "cast via llvm.convert.from.fp16");
938 // FIXME(cir): For now lets pretend we shouldn't use the conversion
939 // intrinsics and insert a cast here unconditionally.
940 src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
941 cgf.floatTy);
942 srcType = cgf.getContext().FloatTy;
943 mlirSrcType = cgf.floatTy;
944 }
945 }
946
947 // TODO(cir): LLVM codegen ignore conversions like int -> uint,
948 // is there anything to be done for CIR here?
949 if (mlirSrcType == mlirDstType) {
950 if (opts.emitImplicitIntegerSignChangeChecks)
951 cgf.getCIRGenModule().errorNYI(loc,
952 "implicit integer sign change checks");
953 return src;
954 }
955
956 // Handle pointer conversions next: pointers can only be converted to/from
957 // other pointers and integers. Check for pointer types in terms of LLVM, as
958 // some native types (like Obj-C id) may map to a pointer type.
959 if (auto dstPT = dyn_cast<cir::PointerType>(mlirDstType)) {
960 cgf.getCIRGenModule().errorNYI(loc, "pointer casts");
961 return builder.getNullPtr(dstPT, src.getLoc());
962 }
963
964 if (isa<cir::PointerType>(mlirSrcType)) {
965 // Must be an ptr to int cast.
966 assert(isa<cir::IntType>(mlirDstType) && "not ptr->int?");
967 return builder.createPtrToInt(src, mlirDstType);
968 }
969
970 // A scalar can be splatted to an extended vector of the same element type
971 if (dstType->isExtVectorType() && !srcType->isVectorType()) {
972 // Sema should add casts to make sure that the source expression's type
973 // is the same as the vector's element type (sans qualifiers)
974 assert(dstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
975 srcType.getTypePtr() &&
976 "Splatted expr doesn't match with vector element type?");
977
978 cgf.getCIRGenModule().errorNYI(loc, "vector splatting");
979 return {};
980 }
981
982 if (srcType->isMatrixType() && dstType->isMatrixType()) {
983 cgf.getCIRGenModule().errorNYI(loc,
984 "matrix type to matrix type conversion");
985 return {};
986 }
987 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
988 "Internal error: conversion between matrix type and scalar type");
989
990 // Finally, we have the arithmetic types or vectors of arithmetic types.
991 mlir::Value res = nullptr;
992 mlir::Type resTy = mlirDstType;
993
994 res = emitScalarCast(src, srcType, dstType, mlirSrcType, mlirDstType, opts);
995
996 if (mlirDstType != resTy) {
997 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
998 cgf.getCIRGenModule().errorNYI(loc, "cast via llvm.convert.to.fp16");
999 }
1000 // FIXME(cir): For now we never use FP16 conversion intrinsics even if
1001 // required by the target. Change that once this is implemented
1002 res = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, res,
1003 resTy);
1004 }
1005
1006 if (opts.emitImplicitIntegerTruncationChecks)
1007 cgf.getCIRGenModule().errorNYI(loc, "implicit integer truncation checks");
1008
1009 if (opts.emitImplicitIntegerSignChangeChecks)
1010 cgf.getCIRGenModule().errorNYI(loc,
1011 "implicit integer sign change checks");
1012
1013 return res;
1014 }
1015
1016 BinOpInfo emitBinOps(const BinaryOperator *e,
1017 QualType promotionType = QualType()) {
1018 ignoreResultAssign = false;
1019 BinOpInfo result;
1020 result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
1021 result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
1022 if (!promotionType.isNull())
1023 result.fullType = promotionType;
1024 else
1025 result.fullType = e->getType();
1026 result.compType = result.fullType;
1027 if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
1028 result.compType = vecType->getElementType();
1029 }
1030 result.opcode = e->getOpcode();
1031 result.loc = e->getSourceRange();
1032 // TODO(cir): Result.FPFeatures
1034 result.e = e;
1035 return result;
1036 }
1037
1038 mlir::Value emitMul(const BinOpInfo &ops);
1039 mlir::Value emitDiv(const BinOpInfo &ops);
1040 mlir::Value emitRem(const BinOpInfo &ops);
1041 mlir::Value emitAdd(const BinOpInfo &ops);
1042 mlir::Value emitSub(const BinOpInfo &ops);
1043 mlir::Value emitShl(const BinOpInfo &ops);
1044 mlir::Value emitShr(const BinOpInfo &ops);
1045 mlir::Value emitAnd(const BinOpInfo &ops);
1046 mlir::Value emitXor(const BinOpInfo &ops);
1047 mlir::Value emitOr(const BinOpInfo &ops);
1048
1049 LValue emitCompoundAssignLValue(
1050 const CompoundAssignOperator *e,
1051 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
1052 mlir::Value &result);
1053 mlir::Value
1054 emitCompoundAssign(const CompoundAssignOperator *e,
1055 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
1056
1057 // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
1058 // codegen.
1059 QualType getPromotionType(QualType ty) {
1060 const clang::ASTContext &ctx = cgf.getContext();
1061 if (auto *complexTy = ty->getAs<ComplexType>()) {
1062 QualType elementTy = complexTy->getElementType();
1063 if (elementTy.UseExcessPrecision(ctx))
1064 return ctx.getComplexType(ctx.FloatTy);
1065 }
1066
1067 if (ty.UseExcessPrecision(cgf.getContext())) {
1068 if (auto *vt = ty->getAs<VectorType>()) {
1069 unsigned numElements = vt->getNumElements();
1070 return ctx.getVectorType(ctx.FloatTy, numElements, vt->getVectorKind());
1071 }
1072 return cgf.getContext().FloatTy;
1073 }
1074
1075 return QualType();
1076 }
1077
1078// Binary operators and binary compound assignment operators.
1079#define HANDLEBINOP(OP) \
1080 mlir::Value VisitBin##OP(const BinaryOperator *e) { \
1081 QualType promotionTy = getPromotionType(e->getType()); \
1082 auto result = emit##OP(emitBinOps(e, promotionTy)); \
1083 if (result && !promotionTy.isNull()) \
1084 result = emitUnPromotedValue(result, e->getType()); \
1085 return result; \
1086 } \
1087 mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) { \
1088 return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP); \
1089 }
1090
1091 HANDLEBINOP(Mul)
1092 HANDLEBINOP(Div)
1093 HANDLEBINOP(Rem)
1094 HANDLEBINOP(Add)
1095 HANDLEBINOP(Sub)
1096 HANDLEBINOP(Shl)
1097 HANDLEBINOP(Shr)
1099 HANDLEBINOP(Xor)
1101#undef HANDLEBINOP
1102
1103 mlir::Value emitCmp(const BinaryOperator *e) {
1104 ignoreResultAssign = false;
1105 const mlir::Location loc = cgf.getLoc(e->getExprLoc());
1106 mlir::Value result;
1107 QualType lhsTy = e->getLHS()->getType();
1108 QualType rhsTy = e->getRHS()->getType();
1109
1110 auto clangCmpToCIRCmp =
1111 [](clang::BinaryOperatorKind clangCmp) -> cir::CmpOpKind {
1112 switch (clangCmp) {
1113 case BO_LT:
1114 return cir::CmpOpKind::lt;
1115 case BO_GT:
1116 return cir::CmpOpKind::gt;
1117 case BO_LE:
1118 return cir::CmpOpKind::le;
1119 case BO_GE:
1120 return cir::CmpOpKind::ge;
1121 case BO_EQ:
1122 return cir::CmpOpKind::eq;
1123 case BO_NE:
1124 return cir::CmpOpKind::ne;
1125 default:
1126 llvm_unreachable("unsupported comparison kind for cir.cmp");
1127 }
1128 };
1129
1130 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
1131 if (lhsTy->getAs<MemberPointerType>()) {
1133 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
1134 mlir::Value lhs = cgf.emitScalarExpr(e->getLHS());
1135 mlir::Value rhs = cgf.emitScalarExpr(e->getRHS());
1136 result = builder.createCompare(loc, kind, lhs, rhs);
1137 } else if (!lhsTy->isAnyComplexType() && !rhsTy->isAnyComplexType()) {
1138 BinOpInfo boInfo = emitBinOps(e);
1139 mlir::Value lhs = boInfo.lhs;
1140 mlir::Value rhs = boInfo.rhs;
1141
1142 if (lhsTy->isVectorType()) {
1143 if (!e->getType()->isVectorType()) {
1144 // If AltiVec, the comparison results in a numeric type, so we use
1145 // intrinsics comparing vectors and giving 0 or 1 as a result
1146 cgf.cgm.errorNYI(loc, "AltiVec comparison");
1147 } else {
1148 // Other kinds of vectors. Element-wise comparison returning
1149 // a vector.
1150 result = cir::VecCmpOp::create(builder, cgf.getLoc(boInfo.loc),
1151 cgf.convertType(boInfo.fullType), kind,
1152 boInfo.lhs, boInfo.rhs);
1153 }
1154 } else if (boInfo.isFixedPointOp()) {
1156 cgf.cgm.errorNYI(loc, "fixed point comparisons");
1157 result = builder.getBool(false, loc);
1158 } else {
1159 // integers and pointers
1160 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers &&
1161 mlir::isa<cir::PointerType>(lhs.getType()) &&
1162 mlir::isa<cir::PointerType>(rhs.getType())) {
1163 cgf.cgm.errorNYI(loc, "strict vtable pointer comparisons");
1164 }
1165
1166 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
1167 result = builder.createCompare(loc, kind, lhs, rhs);
1168 }
1169 } else {
1170 // Complex Comparison: can only be an equality comparison.
1171 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
1172
1173 BinOpInfo boInfo = emitBinOps(e);
1174 result = cir::CmpOp::create(builder, loc, kind, boInfo.lhs, boInfo.rhs);
1175 }
1176
1177 return emitScalarConversion(result, cgf.getContext().BoolTy, e->getType(),
1178 e->getExprLoc());
1179 }
1180
1181// Comparisons.
1182#define VISITCOMP(CODE) \
1183 mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); }
1184 VISITCOMP(LT)
1185 VISITCOMP(GT)
1186 VISITCOMP(LE)
1187 VISITCOMP(GE)
1188 VISITCOMP(EQ)
1189 VISITCOMP(NE)
1190#undef VISITCOMP
1191
1192 mlir::Value VisitBinAssign(const BinaryOperator *e) {
1193 const bool ignore = std::exchange(ignoreResultAssign, false);
1194
1195 mlir::Value rhs;
1196 LValue lhs;
1197
1198 switch (e->getLHS()->getType().getObjCLifetime()) {
1204 break;
1206 // __block variables need to have the rhs evaluated first, plus this
1207 // should improve codegen just a little.
1208 rhs = Visit(e->getRHS());
1210 // TODO(cir): This needs to be emitCheckedLValue() once we support
1211 // sanitizers
1212 lhs = cgf.emitLValue(e->getLHS());
1213
1214 // Store the value into the LHS. Bit-fields are handled specially because
1215 // the result is altered by the store, i.e., [C99 6.5.16p1]
1216 // 'An assignment expression has the value of the left operand after the
1217 // assignment...'.
1218 if (lhs.isBitField()) {
1219 rhs = cgf.emitStoreThroughBitfieldLValue(RValue::get(rhs), lhs);
1220 } else {
1221 cgf.emitNullabilityCheck(lhs, rhs, e->getExprLoc());
1223 cgf, cgf.getLoc(e->getSourceRange())};
1224 cgf.emitStoreThroughLValue(RValue::get(rhs), lhs);
1225 }
1226 }
1227
1228 // If the result is clearly ignored, return now.
1229 if (ignore)
1230 return nullptr;
1231
1232 // The result of an assignment in C is the assigned r-value.
1233 if (!cgf.getLangOpts().CPlusPlus)
1234 return rhs;
1235
1236 // If the lvalue is non-volatile, return the computed value of the
1237 // assignment.
1238 if (!lhs.isVolatile())
1239 return rhs;
1240
1241 // Otherwise, reload the value.
1242 return emitLoadOfLValue(lhs, e->getExprLoc());
1243 }
1244
1245 mlir::Value VisitBinComma(const BinaryOperator *e) {
1246 cgf.emitIgnoredExpr(e->getLHS());
1247 // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
1248 return Visit(e->getRHS());
1249 }
1250
1251 mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
1252 if (e->getType()->isVectorType()) {
1253 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1254 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1255 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1256 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1257 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1258
1259 mlir::Value lhs = Visit(e->getLHS());
1260 mlir::Value rhs = Visit(e->getRHS());
1261
1262 auto cmpOpKind = cir::CmpOpKind::ne;
1263 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1264 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1265 mlir::Value vecOr = builder.createAnd(loc, lhs, rhs);
1266 return builder.createIntCast(vecOr, vecTy);
1267 }
1268
1270 mlir::Type resTy = cgf.convertType(e->getType());
1271 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1272
1273 CIRGenFunction::ConditionalEvaluation eval(cgf);
1274
1275 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1276 auto resOp = cir::TernaryOp::create(
1277 builder, loc, lhsCondV, /*trueBuilder=*/
1278 [&](mlir::OpBuilder &b, mlir::Location loc) {
1279 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1280 b.getInsertionBlock()};
1281 cgf.curLexScope->setAsTernary();
1282 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1283 lexScope.forceCleanup();
1284 cir::YieldOp::create(b, loc, res);
1285 },
1286 /*falseBuilder*/
1287 [&](mlir::OpBuilder &b, mlir::Location loc) {
1288 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1289 b.getInsertionBlock()};
1290 cgf.curLexScope->setAsTernary();
1291 auto res = cir::ConstantOp::create(b, loc, builder.getFalseAttr());
1292 cir::YieldOp::create(b, loc, res.getRes());
1293 });
1294 return maybePromoteBoolResult(resOp.getResult(), resTy);
1295 }
1296
1297 mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
1298 if (e->getType()->isVectorType()) {
1299 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1300 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1301 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1302 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1303 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1304
1305 mlir::Value lhs = Visit(e->getLHS());
1306 mlir::Value rhs = Visit(e->getRHS());
1307
1308 auto cmpOpKind = cir::CmpOpKind::ne;
1309 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1310 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1311 mlir::Value vecOr = builder.createOr(loc, lhs, rhs);
1312 return builder.createIntCast(vecOr, vecTy);
1313 }
1314
1316 mlir::Type resTy = cgf.convertType(e->getType());
1317 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1318
1319 CIRGenFunction::ConditionalEvaluation eval(cgf);
1320
1321 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1322 auto resOp = cir::TernaryOp::create(
1323 builder, loc, lhsCondV, /*trueBuilder=*/
1324 [&](mlir::OpBuilder &b, mlir::Location loc) {
1325 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1326 b.getInsertionBlock()};
1327 cgf.curLexScope->setAsTernary();
1328 auto res = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
1329 cir::YieldOp::create(b, loc, res.getRes());
1330 },
1331 /*falseBuilder*/
1332 [&](mlir::OpBuilder &b, mlir::Location loc) {
1333 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1334 b.getInsertionBlock()};
1335 cgf.curLexScope->setAsTernary();
1336 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1337 lexScope.forceCleanup();
1338 cir::YieldOp::create(b, loc, res);
1339 });
1340
1341 return maybePromoteBoolResult(resOp.getResult(), resTy);
1342 }
1343
1344 mlir::Value VisitBinPtrMemD(const BinaryOperator *e) {
1345 return emitLoadOfLValue(e);
1346 }
1347
1348 mlir::Value VisitBinPtrMemI(const BinaryOperator *e) {
1349 return emitLoadOfLValue(e);
1350 }
1351
1352 // Other Operators.
1353 mlir::Value VisitBlockExpr(const BlockExpr *e) {
1354 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: block");
1355 return {};
1356 }
1357
1358 mlir::Value VisitChooseExpr(ChooseExpr *e) {
1359 return Visit(e->getChosenSubExpr());
1360 }
1361
1362 mlir::Value VisitObjCStringLiteral(const ObjCStringLiteral *e) {
1363 cgf.cgm.errorNYI(e->getSourceRange(),
1364 "ScalarExprEmitter: objc string literal");
1365 return {};
1366 }
1367 mlir::Value VisitObjCBoxedExpr(ObjCBoxedExpr *e) {
1368 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc boxed");
1369 return {};
1370 }
1371 mlir::Value VisitObjCArrayLiteral(ObjCArrayLiteral *e) {
1372 cgf.cgm.errorNYI(e->getSourceRange(),
1373 "ScalarExprEmitter: objc array literal");
1374 return {};
1375 }
1376 mlir::Value VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *e) {
1377 cgf.cgm.errorNYI(e->getSourceRange(),
1378 "ScalarExprEmitter: objc dictionary literal");
1379 return {};
1380 }
1381
1382 mlir::Value VisitAsTypeExpr(AsTypeExpr *e) {
1383 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: as type");
1384 return {};
1385 }
1386
1387 mlir::Value VisitAtomicExpr(AtomicExpr *e) {
1388 return cgf.emitAtomicExpr(e).getValue();
1389 }
1390};
1391
1392LValue ScalarExprEmitter::emitCompoundAssignLValue(
1393 const CompoundAssignOperator *e,
1394 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
1395 mlir::Value &result) {
1397 return cgf.emitScalarCompoundAssignWithComplex(e, result);
1398
1399 QualType lhsTy = e->getLHS()->getType();
1400 BinOpInfo opInfo;
1401
1402 // Emit the RHS first. __block variables need to have the rhs evaluated
1403 // first, plus this should improve codegen a little.
1404
1405 QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
1406 if (promotionTypeCR.isNull())
1407 promotionTypeCR = e->getComputationResultType();
1408
1409 QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
1410 QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType());
1411
1412 if (!promotionTypeRHS.isNull())
1413 opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
1414 else
1415 opInfo.rhs = Visit(e->getRHS());
1416
1417 opInfo.fullType = promotionTypeCR;
1418 opInfo.compType = opInfo.fullType;
1419 if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
1420 opInfo.compType = vecType->getElementType();
1421 opInfo.opcode = e->getOpcode();
1422 opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
1423 opInfo.e = e;
1424 opInfo.loc = e->getSourceRange();
1425
1426 // Load/convert the LHS
1427 LValue lhsLV = cgf.emitLValue(e->getLHS());
1428
1429 if (lhsTy->getAs<AtomicType>()) {
1430 cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
1431 return LValue();
1432 }
1433
1434 opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
1435
1436 CIRGenFunction::SourceLocRAIIObject sourceloc{
1437 cgf, cgf.getLoc(e->getSourceRange())};
1438 SourceLocation loc = e->getExprLoc();
1439 if (!promotionTypeLHS.isNull())
1440 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
1441 else
1442 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
1443 e->getComputationLHSType(), loc);
1444
1445 // Expand the binary operator.
1446 result = (this->*func)(opInfo);
1447
1448 // Convert the result back to the LHS type,
1449 // potentially with Implicit Conversion sanitizer check.
1450 result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
1451 ScalarConversionOpts(cgf.sanOpts));
1452
1453 // Store the result value into the LHS lvalue. Bit-fields are handled
1454 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1455 // 'An assignment expression has the value of the left operand after the
1456 // assignment...'.
1457 if (lhsLV.isBitField())
1458 cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
1459 else
1460 cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
1461
1462 if (cgf.getLangOpts().OpenMP)
1463 cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
1464
1465 return lhsLV;
1466}
1467
1468mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
1469 mlir::Value value,
1470 CastKind kind,
1471 QualType destTy) {
1472 cir::CastKind castOpKind;
1473 switch (kind) {
1474 case CK_FloatingComplexToReal:
1475 castOpKind = cir::CastKind::float_complex_to_real;
1476 break;
1477 case CK_IntegralComplexToReal:
1478 castOpKind = cir::CastKind::int_complex_to_real;
1479 break;
1480 case CK_FloatingComplexToBoolean:
1481 castOpKind = cir::CastKind::float_complex_to_bool;
1482 break;
1483 case CK_IntegralComplexToBoolean:
1484 castOpKind = cir::CastKind::int_complex_to_bool;
1485 break;
1486 default:
1487 llvm_unreachable("invalid complex-to-scalar cast kind");
1488 }
1489
1490 return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
1491}
1492
1493mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
1494 QualType promotionType) {
1495 e = e->IgnoreParens();
1496 if (const auto *bo = dyn_cast<BinaryOperator>(e)) {
1497 switch (bo->getOpcode()) {
1498#define HANDLE_BINOP(OP) \
1499 case BO_##OP: \
1500 return emit##OP(emitBinOps(bo, promotionType));
1501 HANDLE_BINOP(Add)
1502 HANDLE_BINOP(Sub)
1503 HANDLE_BINOP(Mul)
1504 HANDLE_BINOP(Div)
1505#undef HANDLE_BINOP
1506 default:
1507 break;
1508 }
1509 } else if (const auto *uo = dyn_cast<UnaryOperator>(e)) {
1510 switch (uo->getOpcode()) {
1511 case UO_Imag:
1512 case UO_Real:
1513 return VisitRealImag(uo, promotionType);
1514 case UO_Minus:
1515 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType);
1516 case UO_Plus:
1517 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Plus, promotionType);
1518 default:
1519 break;
1520 }
1521 }
1522 mlir::Value result = Visit(const_cast<Expr *>(e));
1523 if (result) {
1524 if (!promotionType.isNull())
1525 return emitPromotedValue(result, promotionType);
1526 return emitUnPromotedValue(result, e->getType());
1527 }
1528 return result;
1529}
1530
1531mlir::Value ScalarExprEmitter::emitCompoundAssign(
1532 const CompoundAssignOperator *e,
1533 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
1534
1535 bool ignore = std::exchange(ignoreResultAssign, false);
1536 mlir::Value rhs;
1537 LValue lhs = emitCompoundAssignLValue(e, func, rhs);
1538
1539 // If the result is clearly ignored, return now.
1540 if (ignore)
1541 return {};
1542
1543 // The result of an assignment in C is the assigned r-value.
1544 if (!cgf.getLangOpts().CPlusPlus)
1545 return rhs;
1546
1547 // If the lvalue is non-volatile, return the computed value of the assignment.
1548 if (!lhs.isVolatile())
1549 return rhs;
1550
1551 // Otherwise, reload the value.
1552 return emitLoadOfLValue(lhs, e->getExprLoc());
1553}
1554
1555mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
1556 mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange());
1557 mlir::OpBuilder &builder = cgf.builder;
1558
1559 auto scope = cir::ScopeOp::create(
1560 builder, scopeLoc,
1561 /*scopeBuilder=*/
1562 [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) {
1563 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1564 builder.getInsertionBlock()};
1565 mlir::Value scopeYieldVal = Visit(e->getSubExpr());
1566 if (scopeYieldVal) {
1567 // Defend against dominance problems caused by jumps out of expression
1568 // evaluation through the shared cleanup block.
1569 lexScope.forceCleanup();
1570 cir::YieldOp::create(builder, loc, scopeYieldVal);
1571 yieldTy = scopeYieldVal.getType();
1572 }
1573 });
1574
1575 return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr;
1576}
1577
1578} // namespace
1579
1580LValue
1582 ScalarExprEmitter emitter(*this, builder);
1583 mlir::Value result;
1584 switch (e->getOpcode()) {
1585#define COMPOUND_OP(Op) \
1586 case BO_##Op##Assign: \
1587 return emitter.emitCompoundAssignLValue(e, &ScalarExprEmitter::emit##Op, \
1588 result)
1589 COMPOUND_OP(Mul);
1590 COMPOUND_OP(Div);
1591 COMPOUND_OP(Rem);
1592 COMPOUND_OP(Add);
1593 COMPOUND_OP(Sub);
1594 COMPOUND_OP(Shl);
1595 COMPOUND_OP(Shr);
1597 COMPOUND_OP(Xor);
1598 COMPOUND_OP(Or);
1599#undef COMPOUND_OP
1600
1601 case BO_PtrMemD:
1602 case BO_PtrMemI:
1603 case BO_Mul:
1604 case BO_Div:
1605 case BO_Rem:
1606 case BO_Add:
1607 case BO_Sub:
1608 case BO_Shl:
1609 case BO_Shr:
1610 case BO_LT:
1611 case BO_GT:
1612 case BO_LE:
1613 case BO_GE:
1614 case BO_EQ:
1615 case BO_NE:
1616 case BO_Cmp:
1617 case BO_And:
1618 case BO_Xor:
1619 case BO_Or:
1620 case BO_LAnd:
1621 case BO_LOr:
1622 case BO_Assign:
1623 case BO_Comma:
1624 llvm_unreachable("Not valid compound assignment operators");
1625 }
1626 llvm_unreachable("Unhandled compound assignment operator");
1627}
1628
1629/// Emit the computation of the specified expression of scalar type.
1631 bool ignoreResultAssign) {
1632 assert(e && hasScalarEvaluationKind(e->getType()) &&
1633 "Invalid scalar expression to emit");
1634
1635 return ScalarExprEmitter(*this, builder, ignoreResultAssign)
1636 .Visit(const_cast<Expr *>(e));
1637}
1638
1640 QualType promotionType) {
1641 if (!promotionType.isNull())
1642 return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
1643 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1644}
1645
1646[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
1647 // If a null pointer expression's type is the C++0x nullptr_t and
1648 // the expression is not a simple literal, it must be evaluated
1649 // for its potential side effects.
1651 return false;
1652 return e->getType()->isNullPtrType();
1653}
1654
1655/// If \p e is a widened promoted integer, get its base (unpromoted) type.
1656static std::optional<QualType>
1657getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
1658 const Expr *base = e->IgnoreImpCasts();
1659 if (e == base)
1660 return std::nullopt;
1661
1662 QualType baseTy = base->getType();
1663 if (!astContext.isPromotableIntegerType(baseTy) ||
1664 astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType()))
1665 return std::nullopt;
1666
1667 return baseTy;
1668}
1669
1670/// Check if \p e is a widened promoted integer.
1671[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
1672 const Expr *e) {
1673 return getUnwidenedIntegerType(astContext, e).has_value();
1674}
1675
1676/// Check if we can skip the overflow check for \p Op.
1677[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
1678 const BinOpInfo &op) {
1679 assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
1680 "Expected a unary or binary operator");
1681
1682 // If the binop has constant inputs and we can prove there is no overflow,
1683 // we can elide the overflow check.
1684 if (!op.mayHaveIntegerOverflow())
1685 return true;
1686
1687 // If a unary op has a widened operand, the op cannot overflow.
1688 if (const auto *uo = dyn_cast<UnaryOperator>(op.e))
1689 return !uo->canOverflow();
1690
1691 // We usually don't need overflow checks for binops with widened operands.
1692 // Multiplication with promoted unsigned operands is a special case.
1693 const auto *bo = cast<BinaryOperator>(op.e);
1694 std::optional<QualType> optionalLHSTy =
1695 getUnwidenedIntegerType(astContext, bo->getLHS());
1696 if (!optionalLHSTy)
1697 return false;
1698
1699 std::optional<QualType> optionalRHSTy =
1700 getUnwidenedIntegerType(astContext, bo->getRHS());
1701 if (!optionalRHSTy)
1702 return false;
1703
1704 QualType lhsTy = *optionalLHSTy;
1705 QualType rhsTy = *optionalRHSTy;
1706
1707 // This is the simple case: binops without unsigned multiplication, and with
1708 // widened operands. No overflow check is needed here.
1709 if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
1710 !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
1711 return true;
1712
1713 // For unsigned multiplication the overflow check can be elided if either one
1714 // of the unpromoted types are less than half the size of the promoted type.
1715 unsigned promotedSize = astContext.getTypeSize(op.e->getType());
1716 return (2 * astContext.getTypeSize(lhsTy)) < promotedSize ||
1717 (2 * astContext.getTypeSize(rhsTy)) < promotedSize;
1718}
1719
1720/// Emit pointer + index arithmetic.
1722 const BinOpInfo &op,
1723 bool isSubtraction) {
1724 // Must have binary (not unary) expr here. Unary pointer
1725 // increment/decrement doesn't use this path.
1727
1728 mlir::Value pointer = op.lhs;
1729 Expr *pointerOperand = expr->getLHS();
1730 mlir::Value index = op.rhs;
1731 Expr *indexOperand = expr->getRHS();
1732
1733 // In the case of subtraction, the FE has ensured that the LHS is always the
1734 // pointer. However, addition can have the pointer on either side. We will
1735 // always have a pointer operand and an integer operand, so if the LHS wasn't
1736 // a pointer, we need to swap our values.
1737 if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) {
1738 std::swap(pointer, index);
1739 std::swap(pointerOperand, indexOperand);
1740 }
1741 assert(mlir::isa<cir::PointerType>(pointer.getType()) &&
1742 "Need a pointer operand");
1743 assert(mlir::isa<cir::IntType>(index.getType()) && "Need an integer operand");
1744
1745 // Some versions of glibc and gcc use idioms (particularly in their malloc
1746 // routines) that add a pointer-sized integer (known to be a pointer value)
1747 // to a null pointer in order to cast the value back to an integer or as
1748 // part of a pointer alignment algorithm. This is undefined behavior, but
1749 // we'd like to be able to compile programs that use it.
1750 //
1751 // Normally, we'd generate a GEP with a null-pointer base here in response
1752 // to that code, but it's also UB to dereference a pointer created that
1753 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
1754 // generate a direct cast of the integer value to a pointer.
1755 //
1756 // The idiom (p = nullptr + N) is not met if any of the following are true:
1757 //
1758 // The operation is subtraction.
1759 // The index is not pointer-sized.
1760 // The pointer type is not byte-sized.
1761 //
1763 cgf.getContext(), op.opcode, expr->getLHS(), expr->getRHS()))
1764 return cgf.getBuilder().createIntToPtr(index, pointer.getType());
1765
1766 // Differently from LLVM codegen, ABI bits for index sizes is handled during
1767 // LLVM lowering.
1768
1769 // If this is subtraction, negate the index.
1770 if (isSubtraction)
1772
1774
1775 const PointerType *pointerType =
1776 pointerOperand->getType()->getAs<PointerType>();
1777 if (!pointerType) {
1778 cgf.cgm.errorNYI("Objective-C:pointer arithmetic with non-pointer type");
1779 return nullptr;
1780 }
1781
1782 QualType elementType = pointerType->getPointeeType();
1783 if (cgf.getContext().getAsVariableArrayType(elementType)) {
1784 cgf.cgm.errorNYI("variable array type");
1785 return nullptr;
1786 }
1787
1788 if (elementType->isVoidType() || elementType->isFunctionType()) {
1789 cgf.cgm.errorNYI("void* or function pointer arithmetic");
1790 return nullptr;
1791 }
1792
1794 return cir::PtrStrideOp::create(cgf.getBuilder(),
1795 cgf.getLoc(op.e->getExprLoc()),
1796 pointer.getType(), pointer, index);
1797}
1798
1799mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
1800 const mlir::Location loc = cgf.getLoc(ops.loc);
1801 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1802 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1803 case LangOptions::SOB_Defined:
1804 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1805 return builder.createMul(loc, ops.lhs, ops.rhs);
1806 [[fallthrough]];
1807 case LangOptions::SOB_Undefined:
1808 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1809 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1810 [[fallthrough]];
1811 case LangOptions::SOB_Trapping:
1812 if (canElideOverflowCheck(cgf.getContext(), ops))
1813 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1814 cgf.cgm.errorNYI("sanitizers");
1815 }
1816 }
1817 if (ops.fullType->isConstantMatrixType()) {
1819 cgf.cgm.errorNYI("matrix types");
1820 return nullptr;
1821 }
1822 if (ops.compType->isUnsignedIntegerType() &&
1823 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1824 !canElideOverflowCheck(cgf.getContext(), ops))
1825 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1826
1827 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1829 return builder.createFMul(loc, ops.lhs, ops.rhs);
1830 }
1831
1832 if (ops.isFixedPointOp()) {
1834 cgf.cgm.errorNYI("fixed point");
1835 return nullptr;
1836 }
1837
1838 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1839 cgf.convertType(ops.fullType), cir::BinOpKind::Mul,
1840 ops.lhs, ops.rhs);
1841}
1842mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
1843 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1844 cgf.convertType(ops.fullType), cir::BinOpKind::Div,
1845 ops.lhs, ops.rhs);
1846}
1847mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
1848 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1849 cgf.convertType(ops.fullType), cir::BinOpKind::Rem,
1850 ops.lhs, ops.rhs);
1851}
1852
1853mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
1854 if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
1855 mlir::isa<cir::PointerType>(ops.rhs.getType()))
1856 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
1857
1858 const mlir::Location loc = cgf.getLoc(ops.loc);
1859 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1860 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1861 case LangOptions::SOB_Defined:
1862 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1863 return builder.createAdd(loc, ops.lhs, ops.rhs);
1864 [[fallthrough]];
1865 case LangOptions::SOB_Undefined:
1866 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1867 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1868 [[fallthrough]];
1869 case LangOptions::SOB_Trapping:
1870 if (canElideOverflowCheck(cgf.getContext(), ops))
1871 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1872 cgf.cgm.errorNYI("sanitizers");
1873 }
1874 }
1875 if (ops.fullType->isConstantMatrixType()) {
1877 cgf.cgm.errorNYI("matrix types");
1878 return nullptr;
1879 }
1880
1881 if (ops.compType->isUnsignedIntegerType() &&
1882 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1883 !canElideOverflowCheck(cgf.getContext(), ops))
1884 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1885
1886 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1888 return builder.createFAdd(loc, ops.lhs, ops.rhs);
1889 }
1890
1891 if (ops.isFixedPointOp()) {
1893 cgf.cgm.errorNYI("fixed point");
1894 return {};
1895 }
1896
1897 return cir::BinOp::create(builder, loc, cgf.convertType(ops.fullType),
1898 cir::BinOpKind::Add, ops.lhs, ops.rhs);
1899}
1900
1901mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
1902 const mlir::Location loc = cgf.getLoc(ops.loc);
1903 // The LHS is always a pointer if either side is.
1904 if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
1905 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1906 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1907 case LangOptions::SOB_Defined: {
1908 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1909 return builder.createSub(loc, ops.lhs, ops.rhs);
1910 [[fallthrough]];
1911 }
1912 case LangOptions::SOB_Undefined:
1913 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1914 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1915 [[fallthrough]];
1916 case LangOptions::SOB_Trapping:
1917 if (canElideOverflowCheck(cgf.getContext(), ops))
1918 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1919 cgf.cgm.errorNYI("sanitizers");
1920 }
1921 }
1922
1923 if (ops.fullType->isConstantMatrixType()) {
1925 cgf.cgm.errorNYI("matrix types");
1926 return nullptr;
1927 }
1928
1929 if (ops.compType->isUnsignedIntegerType() &&
1930 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1931 !canElideOverflowCheck(cgf.getContext(), ops))
1932 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1933
1934 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1936 return builder.createFSub(loc, ops.lhs, ops.rhs);
1937 }
1938
1939 if (ops.isFixedPointOp()) {
1941 cgf.cgm.errorNYI("fixed point");
1942 return {};
1943 }
1944
1945 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1946 cgf.convertType(ops.fullType),
1947 cir::BinOpKind::Sub, ops.lhs, ops.rhs);
1948 }
1949
1950 // If the RHS is not a pointer, then we have normal pointer
1951 // arithmetic.
1952 if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
1953 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
1954
1955 // Otherwise, this is a pointer subtraction
1956
1957 // Do the raw subtraction part.
1958 //
1959 // TODO(cir): note for LLVM lowering out of this; when expanding this into
1960 // LLVM we shall take VLA's, division by element size, etc.
1961 //
1962 // See more in `EmitSub` in CGExprScalar.cpp.
1964 return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.ptrDiffTy,
1965 ops.lhs, ops.rhs);
1966}
1967
1968mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
1969 // TODO: This misses out on the sanitizer check below.
1970 if (ops.isFixedPointOp()) {
1972 cgf.cgm.errorNYI("fixed point");
1973 return {};
1974 }
1975
1976 // CIR accepts shift between different types, meaning nothing special
1977 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1978 // promote or truncate the RHS to the same size as the LHS.
1979
1980 bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) &&
1981 ops.compType->hasSignedIntegerRepresentation() &&
1983 !cgf.getLangOpts().CPlusPlus20;
1984 bool sanitizeUnsignedBase =
1985 cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) &&
1986 ops.compType->hasUnsignedIntegerRepresentation();
1987 bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
1988 bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent);
1989
1990 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1991 if (cgf.getLangOpts().OpenCL)
1992 cgf.cgm.errorNYI("opencl");
1993 else if ((sanitizeBase || sanitizeExponent) &&
1994 mlir::isa<cir::IntType>(ops.lhs.getType()))
1995 cgf.cgm.errorNYI("sanitizers");
1996
1997 return builder.createShiftLeft(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1998}
1999
2000mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
2001 // TODO: This misses out on the sanitizer check below.
2002 if (ops.isFixedPointOp()) {
2004 cgf.cgm.errorNYI("fixed point");
2005 return {};
2006 }
2007
2008 // CIR accepts shift between different types, meaning nothing special
2009 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
2010 // promote or truncate the RHS to the same size as the LHS.
2011
2012 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2013 if (cgf.getLangOpts().OpenCL)
2014 cgf.cgm.errorNYI("opencl");
2015 else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
2016 mlir::isa<cir::IntType>(ops.lhs.getType()))
2017 cgf.cgm.errorNYI("sanitizers");
2018
2019 // Note that we don't need to distinguish unsigned treatment at this
2020 // point since it will be handled later by LLVM lowering.
2021 return builder.createShiftRight(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
2022}
2023
2024mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
2025 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
2026 cgf.convertType(ops.fullType), cir::BinOpKind::And,
2027 ops.lhs, ops.rhs);
2028}
2029mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
2030 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
2031 cgf.convertType(ops.fullType), cir::BinOpKind::Xor,
2032 ops.lhs, ops.rhs);
2033}
2034mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
2035 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
2036 cgf.convertType(ops.fullType), cir::BinOpKind::Or,
2037 ops.lhs, ops.rhs);
2038}
2039
2040// Emit code for an explicit or implicit cast. Implicit
2041// casts have to handle a more broad range of conversions than explicit
2042// casts, as they handle things like function to ptr-to-function decay
2043// etc.
2044mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
2045 Expr *subExpr = ce->getSubExpr();
2046 QualType destTy = ce->getType();
2047 CastKind kind = ce->getCastKind();
2048
2049 // These cases are generally not written to ignore the result of evaluating
2050 // their sub-expressions, so we clear this now.
2051 ignoreResultAssign = false;
2052
2053 switch (kind) {
2054 case clang::CK_Dependent:
2055 llvm_unreachable("dependent cast kind in CIR gen!");
2056 case clang::CK_BuiltinFnToFnPtr:
2057 llvm_unreachable("builtin functions are handled elsewhere");
2058
2059 case CK_CPointerToObjCPointerCast:
2060 case CK_BlockPointerToObjCPointerCast:
2061 case CK_AnyPointerToBlockPointerCast:
2062 case CK_BitCast: {
2063 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
2064 mlir::Type dstTy = cgf.convertType(destTy);
2065
2067
2068 if (cgf.sanOpts.has(SanitizerKind::CFIUnrelatedCast))
2069 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2070 "sanitizer support");
2071
2072 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
2073 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2074 "strict vtable pointers");
2075
2076 // Update heapallocsite metadata when there is an explicit pointer cast.
2078
2079 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2080 // same element type, use the llvm.vector.insert intrinsic to perform the
2081 // bitcast.
2083
2084 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2085 // same element type, use the llvm.vector.extract intrinsic to perform the
2086 // bitcast.
2088
2089 // Perform VLAT <-> VLST bitcast through memory.
2090 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2091 // require the element types of the vectors to be the same, we
2092 // need to keep this around for bitcasts between VLAT <-> VLST where
2093 // the element types of the vectors are not the same, until we figure
2094 // out a better way of doing these casts.
2096
2097 return cgf.getBuilder().createBitcast(cgf.getLoc(subExpr->getSourceRange()),
2098 src, dstTy);
2099 }
2100 case CK_AddressSpaceConversion: {
2101 Expr::EvalResult result;
2102 if (subExpr->EvaluateAsRValue(result, cgf.getContext()) &&
2103 result.Val.isNullPointer()) {
2104 // If e has side effect, it is emitted even if its final result is a
2105 // null pointer. In that case, a DCE pass should be able to
2106 // eliminate the useless instructions emitted during translating E.
2107 if (result.HasSideEffects)
2108 Visit(subExpr);
2109 return cgf.cgm.emitNullConstant(destTy,
2110 cgf.getLoc(subExpr->getExprLoc()));
2111 }
2112
2113 clang::QualType srcTy = subExpr->IgnoreImpCasts()->getType();
2114 if (srcTy->isPointerType() || srcTy->isReferenceType())
2115 srcTy = srcTy->getPointeeType();
2116
2117 clang::LangAS srcLangAS = srcTy.getAddressSpace();
2118 cir::TargetAddressSpaceAttr subExprAS;
2119 if (clang::isTargetAddressSpace(srcLangAS))
2120 subExprAS = cir::toCIRTargetAddressSpace(cgf.getMLIRContext(), srcLangAS);
2121 else
2122 cgf.cgm.errorNYI(subExpr->getSourceRange(),
2123 "non-target address space conversion");
2124 // Since target may map different address spaces in AST to the same address
2125 // space, an address space conversion may end up as a bitcast.
2127 cgf, Visit(subExpr), subExprAS, convertType(destTy));
2128 }
2129
2130 case CK_AtomicToNonAtomic: {
2131 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2132 "CastExpr: ", ce->getCastKindName());
2133 mlir::Location loc = cgf.getLoc(subExpr->getSourceRange());
2134 return cgf.createDummyValue(loc, destTy);
2135 }
2136 case CK_NonAtomicToAtomic:
2137 case CK_UserDefinedConversion:
2138 return Visit(const_cast<Expr *>(subExpr));
2139 case CK_NoOp: {
2140 auto v = Visit(const_cast<Expr *>(subExpr));
2141 if (v) {
2142 // CK_NoOp can model a pointer qualification conversion, which can remove
2143 // an array bound and change the IR type.
2144 // FIXME: Once pointee types are removed from IR, remove this.
2145 mlir::Type t = cgf.convertType(destTy);
2146 if (t != v.getType())
2147 cgf.getCIRGenModule().errorNYI("pointer qualification conversion");
2148 }
2149 return v;
2150 }
2151 case CK_IntegralToPointer: {
2152 mlir::Type destCIRTy = cgf.convertType(destTy);
2153 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
2154
2155 // Properly resize by casting to an int of the same size as the pointer.
2156 // Clang's IntegralToPointer includes 'bool' as the source, but in CIR
2157 // 'bool' is not an integral type. So check the source type to get the
2158 // correct CIR conversion.
2159 mlir::Type middleTy = cgf.cgm.getDataLayout().getIntPtrType(destCIRTy);
2160 mlir::Value middleVal = builder.createCast(
2161 subExpr->getType()->isBooleanType() ? cir::CastKind::bool_to_int
2162 : cir::CastKind::integral,
2163 src, middleTy);
2164
2165 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers) {
2166 cgf.cgm.errorNYI(subExpr->getSourceRange(),
2167 "IntegralToPointer: strict vtable pointers");
2168 return {};
2169 }
2170
2171 return builder.createIntToPtr(middleVal, destCIRTy);
2172 }
2173
2174 case CK_BaseToDerived: {
2175 const CXXRecordDecl *derivedClassDecl = destTy->getPointeeCXXRecordDecl();
2176 assert(derivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2177 Address base = cgf.emitPointerWithAlignment(subExpr);
2178 Address derived = cgf.getAddressOfDerivedClass(
2179 cgf.getLoc(ce->getSourceRange()), base, derivedClassDecl, ce->path(),
2181
2182 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2183 // performed and the object is not of the derived type.
2185
2186 return cgf.getAsNaturalPointerTo(derived, ce->getType()->getPointeeType());
2187 }
2188 case CK_UncheckedDerivedToBase:
2189 case CK_DerivedToBase: {
2190 // The EmitPointerWithAlignment path does this fine; just discard
2191 // the alignment.
2193 ce->getType()->getPointeeType());
2194 }
2195 case CK_Dynamic: {
2196 Address v = cgf.emitPointerWithAlignment(subExpr);
2197 const auto *dce = cast<CXXDynamicCastExpr>(ce);
2198 return cgf.emitDynamicCast(v, dce);
2199 }
2200 case CK_ArrayToPointerDecay:
2201 return cgf.emitArrayToPointerDecay(subExpr).getPointer();
2202
2203 case CK_NullToPointer: {
2204 if (mustVisitNullValue(subExpr))
2205 cgf.emitIgnoredExpr(subExpr);
2206
2207 // Note that DestTy is used as the MLIR type instead of a custom
2208 // nullptr type.
2209 mlir::Type ty = cgf.convertType(destTy);
2210 return builder.getNullPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
2211 }
2212
2213 case CK_LValueToRValue:
2214 assert(cgf.getContext().hasSameUnqualifiedType(subExpr->getType(), destTy));
2215 assert(subExpr->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2216 return Visit(const_cast<Expr *>(subExpr));
2217
2218 case CK_IntegralCast: {
2219 ScalarConversionOpts opts;
2220 if (auto *ice = dyn_cast<ImplicitCastExpr>(ce)) {
2221 if (!ice->isPartOfExplicitCast())
2222 opts = ScalarConversionOpts(cgf.sanOpts);
2223 }
2224 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
2225 ce->getExprLoc(), opts);
2226 }
2227
2228 case CK_FloatingComplexToReal:
2229 case CK_IntegralComplexToReal:
2230 case CK_FloatingComplexToBoolean:
2231 case CK_IntegralComplexToBoolean: {
2232 mlir::Value value = cgf.emitComplexExpr(subExpr);
2233 return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
2234 kind, destTy);
2235 }
2236
2237 case CK_FloatingRealToComplex:
2238 case CK_FloatingComplexCast:
2239 case CK_IntegralRealToComplex:
2240 case CK_IntegralComplexCast:
2241 case CK_IntegralComplexToFloatingComplex:
2242 case CK_FloatingComplexToIntegralComplex:
2243 llvm_unreachable("scalar cast to non-scalar value");
2244
2245 case CK_PointerToIntegral: {
2246 assert(!destTy->isBooleanType() && "bool should use PointerToBool");
2247 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
2248 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2249 "strict vtable pointers");
2250 return builder.createPtrToInt(Visit(subExpr), cgf.convertType(destTy));
2251 }
2252 case CK_ToVoid:
2253 cgf.emitIgnoredExpr(subExpr);
2254 return {};
2255
2256 case CK_IntegralToFloating:
2257 case CK_FloatingToIntegral:
2258 case CK_FloatingCast:
2259 case CK_FixedPointToFloating:
2260 case CK_FloatingToFixedPoint: {
2261 if (kind == CK_FixedPointToFloating || kind == CK_FloatingToFixedPoint) {
2262 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2263 "fixed point casts");
2264 return {};
2265 }
2267 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
2268 ce->getExprLoc());
2269 }
2270
2271 case CK_IntegralToBoolean:
2272 return emitIntToBoolConversion(Visit(subExpr),
2273 cgf.getLoc(ce->getSourceRange()));
2274
2275 case CK_PointerToBoolean:
2276 return emitPointerToBoolConversion(Visit(subExpr), subExpr->getType());
2277 case CK_FloatingToBoolean:
2278 return emitFloatToBoolConversion(Visit(subExpr),
2279 cgf.getLoc(subExpr->getExprLoc()));
2280 case CK_MemberPointerToBoolean: {
2281 mlir::Value memPtr = Visit(subExpr);
2282 return builder.createCast(cgf.getLoc(ce->getSourceRange()),
2283 cir::CastKind::member_ptr_to_bool, memPtr,
2284 cgf.convertType(destTy));
2285 }
2286
2287 case CK_VectorSplat: {
2288 // Create a vector object and fill all elements with the same scalar value.
2289 assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type");
2290 return cir::VecSplatOp::create(builder,
2291 cgf.getLoc(subExpr->getSourceRange()),
2292 cgf.convertType(destTy), Visit(subExpr));
2293 }
2294 case CK_FunctionToPointerDecay:
2295 return cgf.emitLValue(subExpr).getPointer();
2296
2297 default:
2298 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2299 "CastExpr: ", ce->getCastKindName());
2300 }
2301 return {};
2302}
2303
2304mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *e) {
2306 return emitLoadOfLValue(e);
2307
2308 auto v = cgf.emitCallExpr(e).getValue();
2310 return v;
2311}
2312
2313mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
2314 // TODO(cir): The classic codegen calls tryEmitAsConstant() here. Folding
2315 // constants sound like work for MLIR optimizers, but we'll keep an assertion
2316 // for now.
2318 Expr::EvalResult result;
2319 if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
2320 llvm::APSInt value = result.Val.getInt();
2321 cgf.emitIgnoredExpr(e->getBase());
2322 return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value);
2323 }
2324 return emitLoadOfLValue(e);
2325}
2326
2327mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
2328 const unsigned numInitElements = e->getNumInits();
2329
2330 [[maybe_unused]] const bool ignore = std::exchange(ignoreResultAssign, false);
2331 assert((ignore == false ||
2332 (numInitElements == 0 && e->getType()->isVoidType())) &&
2333 "init list ignored");
2334
2335 if (e->hadArrayRangeDesignator()) {
2336 cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator");
2337 return {};
2338 }
2339
2340 if (e->getType()->isVectorType()) {
2341 const auto vectorType =
2342 mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2343
2344 SmallVector<mlir::Value, 16> elements;
2345 for (Expr *init : e->inits()) {
2346 elements.push_back(Visit(init));
2347 }
2348
2349 // Zero-initialize any remaining values.
2350 if (numInitElements < vectorType.getSize()) {
2351 const mlir::Value zeroValue = cgf.getBuilder().getNullValue(
2352 vectorType.getElementType(), cgf.getLoc(e->getSourceRange()));
2353 std::fill_n(std::back_inserter(elements),
2354 vectorType.getSize() - numInitElements, zeroValue);
2355 }
2356
2357 return cir::VecCreateOp::create(cgf.getBuilder(),
2358 cgf.getLoc(e->getSourceRange()), vectorType,
2359 elements);
2360 }
2361
2362 // C++11 value-initialization for the scalar.
2363 if (numInitElements == 0)
2364 return emitNullValue(e->getType(), cgf.getLoc(e->getExprLoc()));
2365
2366 return Visit(e->getInit(0));
2367}
2368
2369mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
2370 QualType srcTy, QualType dstTy,
2371 SourceLocation loc) {
2374 "Invalid scalar expression to emit");
2375 return ScalarExprEmitter(*this, builder)
2376 .emitScalarConversion(src, srcTy, dstTy, loc);
2377}
2378
2380 QualType srcTy,
2381 QualType dstTy,
2382 SourceLocation loc) {
2383 assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) &&
2384 "Invalid complex -> scalar conversion");
2385
2386 QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType();
2387 if (dstTy->isBooleanType()) {
2388 auto kind = complexElemTy->isFloatingType()
2389 ? cir::CastKind::float_complex_to_bool
2390 : cir::CastKind::int_complex_to_bool;
2391 return builder.createCast(getLoc(loc), kind, src, convertType(dstTy));
2392 }
2393
2394 auto kind = complexElemTy->isFloatingType()
2395 ? cir::CastKind::float_complex_to_real
2396 : cir::CastKind::int_complex_to_real;
2397 mlir::Value real =
2398 builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy));
2399 return emitScalarConversion(real, complexElemTy, dstTy, loc);
2400}
2401
2402mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
2403 // Perform vector logical not on comparison with zero vector.
2404 if (e->getType()->isVectorType() &&
2405 e->getType()->castAs<VectorType>()->getVectorKind() ==
2407 mlir::Value oper = Visit(e->getSubExpr());
2408 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2409 auto operVecTy = mlir::cast<cir::VectorType>(oper.getType());
2410 auto exprVecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2411 mlir::Value zeroVec = builder.getNullValue(operVecTy, loc);
2412 return cir::VecCmpOp::create(builder, loc, exprVecTy, cir::CmpOpKind::eq,
2413 oper, zeroVec);
2414 }
2415
2416 // Compare operand to zero.
2417 mlir::Value boolVal = cgf.evaluateExprAsBool(e->getSubExpr());
2418
2419 // Invert value.
2420 boolVal = builder.createNot(boolVal);
2421
2422 // ZExt result to the expr type.
2423 return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
2424}
2425
2426mlir::Value ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *e) {
2427 // Try folding the offsetof to a constant.
2428 Expr::EvalResult evalResult;
2429 if (e->EvaluateAsInt(evalResult, cgf.getContext())) {
2430 mlir::Type type = cgf.convertType(e->getType());
2431 llvm::APSInt value = evalResult.Val.getInt();
2432 return builder.getConstAPInt(cgf.getLoc(e->getExprLoc()), type, value);
2433 }
2434
2436 e->getSourceRange(),
2437 "ScalarExprEmitter::VisitOffsetOfExpr Can't eval expr as int");
2438 return {};
2439}
2440
2441mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) {
2442 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2443 mlir::Value result = VisitRealImag(e, promotionTy);
2444 if (result && !promotionTy.isNull())
2445 result = emitUnPromotedValue(result, e->getType());
2446 return result;
2447}
2448
2449mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
2450 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2451 mlir::Value result = VisitRealImag(e, promotionTy);
2452 if (result && !promotionTy.isNull())
2453 result = emitUnPromotedValue(result, e->getType());
2454 return result;
2455}
2456
2457mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
2458 QualType promotionTy) {
2459 assert(e->getOpcode() == clang::UO_Real ||
2460 e->getOpcode() == clang::UO_Imag &&
2461 "Invalid UnaryOp kind for ComplexType Real or Imag");
2462
2463 Expr *op = e->getSubExpr();
2464 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2465 if (op->getType()->isAnyComplexType()) {
2466 // If it's an l-value, load through the appropriate subobject l-value.
2467 // Note that we have to ask `e` because `op` might be an l-value that
2468 // this won't work for, e.g. an Obj-C property
2469 mlir::Value complex = cgf.emitComplexExpr(op);
2470 if (e->isGLValue() && !promotionTy.isNull()) {
2471 promotionTy = promotionTy->isAnyComplexType()
2472 ? promotionTy
2473 : cgf.getContext().getComplexType(promotionTy);
2474 complex = cgf.emitPromotedValue(complex, promotionTy);
2475 }
2476
2477 return e->getOpcode() == clang::UO_Real
2478 ? builder.createComplexReal(loc, complex)
2479 : builder.createComplexImag(loc, complex);
2480 }
2481
2482 if (e->getOpcode() == UO_Real) {
2483 mlir::Value operand = promotionTy.isNull()
2484 ? Visit(op)
2485 : cgf.emitPromotedScalarExpr(op, promotionTy);
2486 return builder.createComplexReal(loc, operand);
2487 }
2488
2489 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2490 // effects are evaluated, but not the actual value.
2491 mlir::Value operand;
2492 if (op->isGLValue()) {
2493 operand = cgf.emitLValue(op).getPointer();
2494 operand = cir::LoadOp::create(builder, loc, operand);
2495 } else if (!promotionTy.isNull()) {
2496 operand = cgf.emitPromotedScalarExpr(op, promotionTy);
2497 } else {
2498 operand = cgf.emitScalarExpr(op);
2499 }
2500 return builder.createComplexImag(loc, operand);
2501}
2502
2503/// Return the size or alignment of the type of argument of the sizeof
2504/// expression as an integer.
2505mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2506 const UnaryExprOrTypeTraitExpr *e) {
2507 const QualType typeToSize = e->getTypeOfArgument();
2508 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
2509 if (auto kind = e->getKind();
2510 kind == UETT_SizeOf || kind == UETT_DataSizeOf || kind == UETT_CountOf) {
2511 if (const VariableArrayType *vat =
2512 cgf.getContext().getAsVariableArrayType(typeToSize)) {
2513 // For _Countof, we only want to evaluate if the extent is actually
2514 // variable as opposed to a multi-dimensional array whose extent is
2515 // constant but whose element type is variable.
2516 bool evaluateExtent = true;
2517 if (kind == UETT_CountOf && vat->getElementType()->isArrayType()) {
2518 evaluateExtent =
2519 !vat->getSizeExpr()->isIntegerConstantExpr(cgf.getContext());
2520 }
2521
2522 if (evaluateExtent) {
2523 if (e->isArgumentType()) {
2524 // sizeof(type) - make sure to emit the VLA size.
2525 cgf.emitVariablyModifiedType(typeToSize);
2526 } else {
2527 // C99 6.5.3.4p2: If the argument is an expression of type
2528 // VLA, it is evaluated.
2530 }
2531
2532 // For _Countof, we just want to return the size of a single dimension.
2533 if (kind == UETT_CountOf)
2534 return cgf.getVLAElements1D(vat).numElts;
2535
2536 // For sizeof and __datasizeof, we need to scale the number of elements
2537 // by the size of the array element type.
2538 CIRGenFunction::VlaSizePair vlaSize = cgf.getVLASize(vat);
2539 mlir::Value numElts = vlaSize.numElts;
2540
2541 // Scale the number of non-VLA elements by the non-VLA element size.
2542 CharUnits eltSize = cgf.getContext().getTypeSizeInChars(vlaSize.type);
2543 if (!eltSize.isOne()) {
2544 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2545 mlir::Value eltSizeValue =
2546 builder.getConstAPInt(numElts.getLoc(), numElts.getType(),
2547 cgf.cgm.getSize(eltSize).getValue());
2548 return builder.createMul(loc, eltSizeValue, numElts,
2550 }
2551
2552 return numElts;
2553 }
2554 }
2555 } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
2557 e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign",
2558 e->getStmtClassName());
2559 return builder.getConstant(
2560 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2561 llvm::APSInt(llvm::APInt(64, 1), true)));
2562 }
2563
2564 return builder.getConstant(
2565 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2567}
2568
2569/// Return true if the specified expression is cheap enough and side-effect-free
2570/// enough to evaluate unconditionally instead of conditionally. This is used
2571/// to convert control flow into selects in some cases.
2572/// TODO(cir): can be shared with LLVM codegen.
2574 CIRGenFunction &cgf) {
2575 // Anything that is an integer or floating point constant is fine.
2576 return e->IgnoreParens()->isEvaluatable(cgf.getContext());
2577
2578 // Even non-volatile automatic variables can't be evaluated unconditionally.
2579 // Referencing a thread_local may cause non-trivial initialization work to
2580 // occur. If we're inside a lambda and one of the variables is from the scope
2581 // outside the lambda, that function may have returned already. Reading its
2582 // locals is a bad idea. Also, these reads may introduce races there didn't
2583 // exist in the source-level program.
2584}
2585
2586mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
2587 const AbstractConditionalOperator *e) {
2588 CIRGenBuilderTy &builder = cgf.getBuilder();
2589 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2590 ignoreResultAssign = false;
2591
2592 // Bind the common expression if necessary.
2593 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
2594
2595 Expr *condExpr = e->getCond();
2596 Expr *lhsExpr = e->getTrueExpr();
2597 Expr *rhsExpr = e->getFalseExpr();
2598
2599 // If the condition constant folds and can be elided, try to avoid emitting
2600 // the condition and the dead arm.
2601 bool condExprBool;
2602 if (cgf.constantFoldsToBool(condExpr, condExprBool)) {
2603 Expr *live = lhsExpr, *dead = rhsExpr;
2604 if (!condExprBool)
2605 std::swap(live, dead);
2606
2607 // If the dead side doesn't have labels we need, just emit the Live part.
2608 if (!cgf.containsLabel(dead)) {
2609 if (condExprBool)
2611 mlir::Value result = Visit(live);
2612
2613 // If the live part is a throw expression, it acts like it has a void
2614 // type, so evaluating it returns a null Value. However, a conditional
2615 // with non-void type must return a non-null Value.
2616 if (!result && !e->getType()->isVoidType()) {
2617 result = builder.getConstant(
2618 loc, cir::PoisonAttr::get(builder.getContext(),
2619 cgf.convertType(e->getType())));
2620 }
2621
2622 return result;
2623 }
2624 }
2625
2626 QualType condType = condExpr->getType();
2627
2628 // OpenCL: If the condition is a vector, we can treat this condition like
2629 // the select function.
2630 if ((cgf.getLangOpts().OpenCL && condType->isVectorType()) ||
2631 condType->isExtVectorType()) {
2633 cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
2634 }
2635
2636 if (condType->isVectorType() || condType->isSveVLSBuiltinType()) {
2637 if (!condType->isVectorType()) {
2639 cgf.cgm.errorNYI(loc, "TernaryOp for SVE vector");
2640 return {};
2641 }
2642
2643 mlir::Value condValue = Visit(condExpr);
2644 mlir::Value lhsValue = Visit(lhsExpr);
2645 mlir::Value rhsValue = Visit(rhsExpr);
2646 return cir::VecTernaryOp::create(builder, loc, condValue, lhsValue,
2647 rhsValue);
2648 }
2649
2650 // If this is a really simple expression (like x ? 4 : 5), emit this as a
2651 // select instead of as control flow. We can only do this if it is cheap
2652 // and safe to evaluate the LHS and RHS unconditionally.
2653 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, cgf) &&
2655 bool lhsIsVoid = false;
2656 mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
2658
2659 mlir::Value lhs = Visit(lhsExpr);
2660 if (!lhs) {
2661 lhs = builder.getNullValue(cgf.voidTy, loc);
2662 lhsIsVoid = true;
2663 }
2664
2665 mlir::Value rhs = Visit(rhsExpr);
2666 if (lhsIsVoid) {
2667 assert(!rhs && "lhs and rhs types must match");
2668 rhs = builder.getNullValue(cgf.voidTy, loc);
2669 }
2670
2671 return builder.createSelect(loc, condV, lhs, rhs);
2672 }
2673
2674 mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
2675 CIRGenFunction::ConditionalEvaluation eval(cgf);
2676 SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
2677 mlir::Type yieldTy{};
2678
2679 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
2680 CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
2682
2684 eval.beginEvaluation();
2685 mlir::Value branch = Visit(expr);
2686 eval.endEvaluation();
2687
2688 if (branch) {
2689 yieldTy = branch.getType();
2690 cir::YieldOp::create(b, loc, branch);
2691 } else {
2692 // If LHS or RHS is a throw or void expression we need to patch
2693 // arms as to properly match yield types.
2694 insertPoints.push_back(b.saveInsertionPoint());
2695 }
2696 };
2697
2698 mlir::Value result = cir::TernaryOp::create(
2699 builder, loc, condV,
2700 /*trueBuilder=*/
2701 [&](mlir::OpBuilder &b, mlir::Location loc) {
2702 emitBranch(b, loc, lhsExpr);
2703 },
2704 /*falseBuilder=*/
2705 [&](mlir::OpBuilder &b, mlir::Location loc) {
2706 emitBranch(b, loc, rhsExpr);
2707 })
2708 .getResult();
2709
2710 if (!insertPoints.empty()) {
2711 // If both arms are void, so be it.
2712 if (!yieldTy)
2713 yieldTy = cgf.voidTy;
2714
2715 // Insert required yields.
2716 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2717 mlir::OpBuilder::InsertionGuard guard(builder);
2718 builder.restoreInsertionPoint(toInsert);
2719
2720 // Block does not return: build empty yield.
2721 if (mlir::isa<cir::VoidType>(yieldTy)) {
2722 cir::YieldOp::create(builder, loc);
2723 } else { // Block returns: set null yield value.
2724 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2725 cir::YieldOp::create(builder, loc, op0);
2726 }
2727 }
2728 }
2729
2730 return result;
2731}
2732
2734 LValue lv,
2735 cir::UnaryOpKind kind,
2736 bool isPre) {
2737 return ScalarExprEmitter(*this, builder)
2738 .emitScalarPrePostIncDec(e, lv, kind, isPre);
2739}
#define HANDLE_BINOP(OP)
static bool mustVisitNullValue(const Expr *e)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static bool isWidenedIntegerOp(const ASTContext &astContext, const Expr *e)
Check if e is a widened promoted integer.
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static bool canElideOverflowCheck(const ASTContext &astContext, const BinOpInfo &op)
Check if we can skip the overflow check for Op.
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConstantOp getBool(bool state, mlir::Location loc)
cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc)
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
mlir::Value createCast(mlir::Location loc, cir::CastKind kind, mlir::Value src, mlir::Type newTy)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
mlir::Type getIntPtrType(mlir::Type ty) const
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:489
bool isNullPointer() const
Definition APValue.cpp:1019
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CanQualType FloatTy
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
CanQualType BoolTy
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
LabelDecl * getLabel() const
Definition Expr.h:4507
uint64_t getValue() const
Definition ExprCXX.h:3044
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
SourceLocation getExprLoc() const
Definition Expr.h:4013
Expr * getRHS() const
Definition Expr.h:4024
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4185
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2204
Opcode getOpcode() const
Definition Expr.h:4017
BinaryOperatorKind Opcode
Definition Expr.h:3977
mlir::Value getPointer() const
Definition Address.h:90
mlir::Value createNeg(mlir::Value value)
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
mlir::Type convertType(clang::QualType t)
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
const clang::LangOptions & getLangOpts() const
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
clang::ASTContext & getContext() const
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
const TargetCIRGenInfo & getTargetCIRGenInfo()
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
mlir::Value getPointer() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
virtual mlir::Value performAddrSpaceCast(CIRGenFunction &cgf, mlir::Value v, cir::TargetAddressSpaceAttr srcAddr, mlir::Type destTy, bool isNonNull=false) const
Perform address space cast of an expression of pointer type.
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4332
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:304
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1602
CastKind getCastKind() const
Definition Expr.h:3654
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3697
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1950
Expr * getSubExpr()
Definition Expr.h:3660
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1629
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4818
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3276
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4234
QualType getComputationLHSType() const
Definition Expr.h:4268
QualType getComputationResultType() const
Definition Expr.h:4271
SourceLocation getExprLoc() const LLVM_READONLY
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4743
ChildElementIter< false > begin()
Definition Expr.h:5166
size_t getDataElementCount() const
Definition Expr.h:5082
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6396
unsigned getNumInits() const
Definition Expr.h:5263
bool hadArrayRangeDesignator() const
Definition Expr.h:5417
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
ArrayRef< Expr * > inits()
Definition Expr.h:5283
bool isSignedOverflowDefined() const
Expr * getBase() const
Definition Expr.h:3375
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3493
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3654
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:218
SourceRange getSourceRange() const
Definition ExprObjC.h:1719
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:162
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:381
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4639
const Expr * getSubExpr() const
Definition Expr.h:2199
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8293
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8419
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getCanonicalType() const
Definition TypeBase.h:8345
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1612
bool isCanonical() const
Definition TypeBase.h:8350
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
bool isSatisfied() const
Whether or not the requires clause is satisfied.
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4610
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4616
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2281
SourceLocation getLocation() const
Definition Expr.h:4995
Encodes a location in the source.
SourceLocation getBegin() const
CompoundStmt * getSubStmt()
Definition Expr.h:4546
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool getBoolValue() const
Definition ExprCXX.h:2947
bool isStoredAsBoolean() const
Definition ExprCXX.h:2943
bool isVoidType() const
Definition TypeBase.h:8892
bool isBooleanType() const
Definition TypeBase.h:9022
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2205
bool isConstantMatrixType() const
Definition TypeBase.h:8697
bool isPointerType() const
Definition TypeBase.h:8530
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8936
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9179
bool isReferenceType() const
Definition TypeBase.h:8554
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1909
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2607
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e....
Definition Type.cpp:2291
bool isExtVectorType() const
Definition TypeBase.h:8673
bool isAnyComplexType() const
Definition TypeBase.h:8665
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8948
bool isHalfType() const
Definition TypeBase.h:8896
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2243
bool isMatrixType() const
Definition TypeBase.h:8693
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2801
bool isFunctionType() const
Definition TypeBase.h:8526
bool isVectorType() const
Definition TypeBase.h:8669
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2253
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9112
bool isNullPtrType() const
Definition TypeBase.h:8929
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represents a GCC generic vector type.
Definition TypeBase.h:4176
VectorKind getVectorKind() const
Definition TypeBase.h:4196
cir::TargetAddressSpaceAttr toCIRTargetAddressSpace(mlir::MLIRContext &context, clang::LangAS langAS)
Definition CIRTypes.cpp:836
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
bool isTargetAddressSpace(LangAS AS)
LangAS
Defines the address space values used by the address space qualifier of QualType.
CastKind
CastKind - The kind of operation required for a conversion.
@ Generic
not a target-specific vector type
Definition TypeBase.h:4137
U cast(CodeGen::Address addr)
Definition Address.h:327
#define false
Definition stdbool.h:26
static bool instrumentation()
static bool dataMemberType()
static bool objCLifetime()
static bool addressSpace()
static bool fixedPointType()
static bool vecTernaryOp()
static bool cgFPOptionsRAII()
static bool fpConstraints()
static bool addHeapAllocSiteMetadata()
static bool mayHaveIntegerOverflow()
static bool tryEmitAsConstant()
static bool llvmLoweringPtrDiffConsidersPointee()
static bool scalableVectors()
static bool emitLValueAlignmentAssumption()
static bool incrementProfileCounter()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174