clang 22.0.0git
CIRGenAtomic.cpp
Go to the documentation of this file.
1//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
15
16using namespace clang;
17using namespace clang::CIRGen;
18using namespace cir;
19
20namespace {
21class AtomicInfo {
22 CIRGenFunction &cgf;
23 QualType atomicTy;
24 QualType valueTy;
25 uint64_t atomicSizeInBits = 0;
26 uint64_t valueSizeInBits = 0;
27 CharUnits atomicAlign;
28 CharUnits valueAlign;
29 TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
30 bool useLibCall = true;
31 LValue lvalue;
32 mlir::Location loc;
33
34public:
35 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
36 : cgf(cgf), loc(loc) {
37 assert(!lvalue.isGlobalReg());
38 ASTContext &ctx = cgf.getContext();
39 if (lvalue.isSimple()) {
40 atomicTy = lvalue.getType();
41 if (auto *ty = atomicTy->getAs<AtomicType>())
42 valueTy = ty->getValueType();
43 else
44 valueTy = atomicTy;
45 evaluationKind = cgf.getEvaluationKind(valueTy);
46
47 TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
48 TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
49 uint64_t valueAlignInBits = valueTypeInfo.Align;
50 uint64_t atomicAlignInBits = atomicTypeInfo.Align;
51 valueSizeInBits = valueTypeInfo.Width;
52 atomicSizeInBits = atomicTypeInfo.Width;
53 assert(valueSizeInBits <= atomicSizeInBits);
54 assert(valueAlignInBits <= atomicAlignInBits);
55
56 atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
57 valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
58 if (lvalue.getAlignment().isZero())
59 lvalue.setAlignment(atomicAlign);
60
61 this->lvalue = lvalue;
62 } else {
64 cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
65 }
66 useLibCall = !ctx.getTargetInfo().hasBuiltinAtomic(
67 atomicSizeInBits, ctx.toBits(lvalue.getAlignment()));
68 }
69
70 QualType getValueType() const { return valueTy; }
71 CharUnits getAtomicAlignment() const { return atomicAlign; }
72 TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
73 mlir::Value getAtomicPointer() const {
74 if (lvalue.isSimple())
75 return lvalue.getPointer();
77 return nullptr;
78 }
79 bool shouldUseLibCall() const { return useLibCall; }
80 const LValue &getAtomicLValue() const { return lvalue; }
81 Address getAtomicAddress() const {
82 mlir::Type elemTy;
83 if (lvalue.isSimple()) {
84 elemTy = lvalue.getAddress().getElementType();
85 } else {
87 cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
88 }
89 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
90 }
91
92 /// Is the atomic size larger than the underlying value type?
93 ///
94 /// Note that the absence of padding does not mean that atomic
95 /// objects are completely interchangeable with non-atomic
96 /// objects: we might have promoted the alignment of a type
97 /// without making it bigger.
98 bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
99
100 bool emitMemSetZeroIfNecessary() const;
101
102 mlir::Value getScalarRValValueOrNull(RValue rvalue) const;
103
104 /// Cast the given pointer to an integer pointer suitable for atomic
105 /// operations on the source.
106 Address castToAtomicIntPointer(Address addr) const;
107
108 /// If addr is compatible with the iN that will be used for an atomic
109 /// operation, bitcast it. Otherwise, create a temporary that is suitable and
110 /// copy the value across.
111 Address convertToAtomicIntPointer(Address addr) const;
112
113 /// Converts a rvalue to integer value.
114 mlir::Value convertRValueToInt(RValue rvalue, bool cmpxchg = false) const;
115
116 /// Copy an atomic r-value into atomic-layout memory.
117 void emitCopyIntoMemory(RValue rvalue) const;
118
119 /// Project an l-value down to the value field.
120 LValue projectValue() const {
121 assert(lvalue.isSimple());
122 Address addr = getAtomicAddress();
123 if (hasPadding()) {
124 cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
125 }
126
128 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
129 }
130
131 /// Creates temp alloca for intermediate operations on atomic value.
132 Address createTempAlloca() const;
133
134private:
135 bool requiresMemSetZero(mlir::Type ty) const;
136};
137} // namespace
138
139// This function emits any expression (scalar, complex, or aggregate)
140// into a temporary alloca.
142 Address declPtr = cgf.createMemTemp(
143 e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
144 cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
145 /*Init*/ true);
146 return declPtr;
147}
148
149/// Does a store of the given IR type modify the full expected width?
150static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
151 uint64_t expectedSize) {
152 return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
153}
154
155/// Does the atomic type require memsetting to zero before initialization?
156///
157/// The IR type is provided as a way of making certain queries faster.
158bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
159 // If the atomic type has size padding, we definitely need a memset.
160 if (hasPadding())
161 return true;
162
163 // Otherwise, do some simple heuristics to try to avoid it:
164 switch (getEvaluationKind()) {
165 // For scalars and complexes, check whether the store size of the
166 // type uses the full size.
167 case cir::TEK_Scalar:
168 return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
169 case cir::TEK_Complex:
170 return !isFullSizeType(cgf.cgm,
171 mlir::cast<cir::ComplexType>(ty).getElementType(),
172 atomicSizeInBits / 2);
173 // Padding in structs has an undefined bit pattern. User beware.
175 return false;
176 }
177 llvm_unreachable("bad evaluation kind");
178}
179
180Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
181 mlir::Type ty = addr.getElementType();
182 uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
183 if (sourceSizeInBits != atomicSizeInBits) {
184 cgf.cgm.errorNYI(
185 loc,
186 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
187 }
188
189 return castToAtomicIntPointer(addr);
190}
191
192Address AtomicInfo::createTempAlloca() const {
193 Address tempAlloca = cgf.createMemTemp(
194 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
195 : atomicTy,
196 getAtomicAlignment(), loc, "atomic-temp");
197
198 // Cast to pointer to value type for bitfields.
199 if (lvalue.isBitField()) {
200 cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
201 }
202
203 return tempAlloca;
204}
205
206mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue rvalue) const {
207 if (rvalue.isScalar() && (!hasPadding() || !lvalue.isSimple()))
208 return rvalue.getValue();
209 return nullptr;
210}
211
212Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
213 auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
214 // Don't bother with int casts if the integer size is the same.
215 if (intTy && intTy.getWidth() == atomicSizeInBits)
216 return addr;
217 auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
218 return addr.withElementType(cgf.getBuilder(), ty);
219}
220
221bool AtomicInfo::emitMemSetZeroIfNecessary() const {
222 assert(lvalue.isSimple());
223 Address addr = lvalue.getAddress();
224 if (!requiresMemSetZero(addr.getElementType()))
225 return false;
226
227 cgf.cgm.errorNYI(loc,
228 "AtomicInfo::emitMemSetZeroIfNecaessary: emit memset zero");
229 return false;
230}
231
232/// Return true if \param valueTy is a type that should be casted to integer
233/// around the atomic memory operation. If \param cmpxchg is true, then the
234/// cast of a floating point type is made as that instruction can not have
235/// floating point operands. TODO: Allow compare-and-exchange and FP - see
236/// comment in CIRGenAtomicExpandPass.cpp.
237static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg) {
238 if (cir::isAnyFloatingPointType(valueTy))
239 return isa<cir::FP80Type>(valueTy) || cmpxchg;
240 return !isa<cir::IntType>(valueTy) && !isa<cir::PointerType>(valueTy);
241}
242
243mlir::Value AtomicInfo::convertRValueToInt(RValue rvalue, bool cmpxchg) const {
244 // If we've got a scalar value of the right size, try to avoid going
245 // through memory. Floats get casted if needed by AtomicExpandPass.
246 if (mlir::Value value = getScalarRValValueOrNull(rvalue)) {
247 if (!shouldCastToInt(value.getType(), cmpxchg))
248 return cgf.emitToMemory(value, valueTy);
249
250 cgf.cgm.errorNYI(
251 loc, "AtomicInfo::convertRValueToInt: cast scalar rvalue to int");
252 return nullptr;
253 }
254
255 cgf.cgm.errorNYI(
256 loc, "AtomicInfo::convertRValueToInt: cast non-scalar rvalue to int");
257 return nullptr;
258}
259
260/// Copy an r-value into memory as part of storing to an atomic type.
261/// This needs to create a bit-pattern suitable for atomic operations.
262void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
263 assert(lvalue.isSimple());
264
265 // If we have an r-value, the rvalue should be of the atomic type,
266 // which means that the caller is responsible for having zeroed
267 // any padding. Just do an aggregate copy of that type.
268 if (rvalue.isAggregate()) {
269 cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
270 return;
271 }
272
273 // Okay, otherwise we're copying stuff.
274
275 // Zero out the buffer if necessary.
276 emitMemSetZeroIfNecessary();
277
278 // Drill past the padding if present.
279 LValue tempLValue = projectValue();
280
281 // Okay, store the rvalue in.
282 if (rvalue.isScalar()) {
283 cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
284 } else {
285 cgf.cgm.errorNYI("copying complex into atomic lvalue");
286 }
287}
288
290 mlir::Location loc) {
291 mlir::ArrayAttr ordersAttr = builder.getArrayAttr({});
292 mlir::OpBuilder::InsertPoint insertPoint;
293 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Default,
294 insertPoint);
295 builder.restoreInsertionPoint(insertPoint);
296}
297
298// Create a "case" operation with the given list of orders as its values. Also
299// create the region that will hold the body of the switch-case label.
300static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc,
301 mlir::Type orderType,
304 for (cir::MemOrder order : orders)
305 orderAttrs.push_back(cir::IntAttr::get(orderType, static_cast<int>(order)));
306 mlir::ArrayAttr ordersAttr = builder.getArrayAttr(orderAttrs);
307
308 mlir::OpBuilder::InsertPoint insertPoint;
309 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Anyof,
310 insertPoint);
311 builder.restoreInsertionPoint(insertPoint);
312}
313
314static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
315 Address dest, Address ptr, Address val1,
316 Address val2, uint64_t size,
317 cir::MemOrder successOrder,
318 cir::MemOrder failureOrder) {
319 mlir::Location loc = cgf.getLoc(e->getSourceRange());
320
321 CIRGenBuilderTy &builder = cgf.getBuilder();
322 mlir::Value expected = builder.createLoad(loc, val1);
323 mlir::Value desired = builder.createLoad(loc, val2);
324
325 auto cmpxchg = cir::AtomicCmpXchgOp::create(
326 builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(),
327 expected, desired,
328 cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
329 cir::MemOrderAttr::get(&cgf.getMLIRContext(), failureOrder),
330 builder.getI64IntegerAttr(ptr.getAlignment().getAsAlign().value()));
331
332 cmpxchg.setIsVolatile(e->isVolatile());
333 cmpxchg.setWeak(isWeak);
334
335 mlir::Value failed = builder.createNot(cmpxchg.getSuccess());
336 cir::IfOp::create(builder, loc, failed, /*withElseRegion=*/false,
337 [&](mlir::OpBuilder &, mlir::Location) {
338 auto ptrTy = mlir::cast<cir::PointerType>(
339 val1.getPointer().getType());
340 if (val1.getElementType() != ptrTy.getPointee()) {
341 val1 = val1.withPointer(builder.createPtrBitcast(
342 val1.getPointer(), val1.getElementType()));
343 }
344 builder.createStore(loc, cmpxchg.getOld(), val1);
345 builder.createYield(loc);
346 });
347
348 // Update the memory at Dest with Success's value.
349 cgf.emitStoreOfScalar(cmpxchg.getSuccess(),
350 cgf.makeAddrLValue(dest, e->getType()),
351 /*isInit=*/false);
352}
353
355 bool isWeak, Address dest, Address ptr,
356 Address val1, Address val2,
357 Expr *failureOrderExpr, uint64_t size,
358 cir::MemOrder successOrder) {
359 Expr::EvalResult failureOrderEval;
360 if (failureOrderExpr->EvaluateAsInt(failureOrderEval, cgf.getContext())) {
361 uint64_t failureOrderInt = failureOrderEval.Val.getInt().getZExtValue();
362
363 cir::MemOrder failureOrder;
364 if (!cir::isValidCIRAtomicOrderingCABI(failureOrderInt)) {
365 failureOrder = cir::MemOrder::Relaxed;
366 } else {
367 switch ((cir::MemOrder)failureOrderInt) {
368 case cir::MemOrder::Relaxed:
369 // 31.7.2.18: "The failure argument shall not be memory_order_release
370 // nor memory_order_acq_rel". Fallback to monotonic.
371 case cir::MemOrder::Release:
372 case cir::MemOrder::AcquireRelease:
373 failureOrder = cir::MemOrder::Relaxed;
374 break;
375 case cir::MemOrder::Consume:
376 case cir::MemOrder::Acquire:
377 failureOrder = cir::MemOrder::Acquire;
378 break;
379 case cir::MemOrder::SequentiallyConsistent:
380 failureOrder = cir::MemOrder::SequentiallyConsistent;
381 break;
382 }
383 }
384
385 // Prior to c++17, "the failure argument shall be no stronger than the
386 // success argument". This condition has been lifted and the only
387 // precondition is 31.7.2.18. Effectively treat this as a DR and skip
388 // language version checks.
389 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size, successOrder,
390 failureOrder);
391 return;
392 }
393
395 cgf.cgm.errorNYI(e->getSourceRange(),
396 "emitAtomicCmpXchgFailureSet: non-constant failure order");
397}
398
400 Address ptr, Address val1, Address val2,
401 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
402 cir::MemOrder order, cir::SyncScopeKind scope) {
404 llvm::StringRef opName;
405
406 CIRGenBuilderTy &builder = cgf.getBuilder();
407 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
408 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
409 auto scopeAttr = cir::SyncScopeKindAttr::get(builder.getContext(), scope);
410 cir::AtomicFetchKindAttr fetchAttr;
411 bool fetchFirst = true;
412
413 switch (expr->getOp()) {
414 case AtomicExpr::AO__c11_atomic_init:
415 llvm_unreachable("already handled!");
416
417 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
418 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/false, dest, ptr, val1,
419 val2, failureOrderExpr, size, order);
420 return;
421
422 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
423 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/true, dest, ptr, val1,
424 val2, failureOrderExpr, size, order);
425 return;
426
427 case AtomicExpr::AO__atomic_compare_exchange:
428 case AtomicExpr::AO__atomic_compare_exchange_n: {
429 bool isWeak = false;
430 if (isWeakExpr->EvaluateAsBooleanCondition(isWeak, cgf.getContext())) {
431 emitAtomicCmpXchgFailureSet(cgf, expr, isWeak, dest, ptr, val1, val2,
432 failureOrderExpr, size, order);
433 } else {
435 cgf.cgm.errorNYI(expr->getSourceRange(),
436 "emitAtomicOp: non-constant isWeak");
437 }
438 return;
439 }
440
441 case AtomicExpr::AO__c11_atomic_load:
442 case AtomicExpr::AO__atomic_load_n:
443 case AtomicExpr::AO__atomic_load:
444 case AtomicExpr::AO__scoped_atomic_load_n:
445 case AtomicExpr::AO__scoped_atomic_load: {
446 cir::LoadOp load =
447 builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
448
449 load->setAttr("mem_order", orderAttr);
450 load->setAttr("sync_scope", scopeAttr);
451
452 builder.createStore(loc, load->getResult(0), dest);
453 return;
454 }
455
456 case AtomicExpr::AO__c11_atomic_store:
457 case AtomicExpr::AO__atomic_store_n:
458 case AtomicExpr::AO__atomic_store:
459 case AtomicExpr::AO__scoped_atomic_store:
460 case AtomicExpr::AO__scoped_atomic_store_n: {
461 cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
462
464
465 builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
466 /*align=*/mlir::IntegerAttr{}, scopeAttr, orderAttr);
467 return;
468 }
469
470 case AtomicExpr::AO__c11_atomic_exchange:
471 case AtomicExpr::AO__atomic_exchange_n:
472 case AtomicExpr::AO__atomic_exchange:
473 opName = cir::AtomicXchgOp::getOperationName();
474 break;
475
476 case AtomicExpr::AO__atomic_add_fetch:
477 fetchFirst = false;
478 [[fallthrough]];
479 case AtomicExpr::AO__c11_atomic_fetch_add:
480 case AtomicExpr::AO__atomic_fetch_add:
481 opName = cir::AtomicFetchOp::getOperationName();
482 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
483 cir::AtomicFetchKind::Add);
484 break;
485
486 case AtomicExpr::AO__atomic_sub_fetch:
487 fetchFirst = false;
488 [[fallthrough]];
489 case AtomicExpr::AO__c11_atomic_fetch_sub:
490 case AtomicExpr::AO__atomic_fetch_sub:
491 opName = cir::AtomicFetchOp::getOperationName();
492 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
493 cir::AtomicFetchKind::Sub);
494 break;
495
496 case AtomicExpr::AO__atomic_min_fetch:
497 fetchFirst = false;
498 [[fallthrough]];
499 case AtomicExpr::AO__c11_atomic_fetch_min:
500 case AtomicExpr::AO__atomic_fetch_min:
501 opName = cir::AtomicFetchOp::getOperationName();
502 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
503 cir::AtomicFetchKind::Min);
504 break;
505
506 case AtomicExpr::AO__atomic_max_fetch:
507 fetchFirst = false;
508 [[fallthrough]];
509 case AtomicExpr::AO__c11_atomic_fetch_max:
510 case AtomicExpr::AO__atomic_fetch_max:
511 opName = cir::AtomicFetchOp::getOperationName();
512 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
513 cir::AtomicFetchKind::Max);
514 break;
515
516 case AtomicExpr::AO__atomic_and_fetch:
517 fetchFirst = false;
518 [[fallthrough]];
519 case AtomicExpr::AO__c11_atomic_fetch_and:
520 case AtomicExpr::AO__atomic_fetch_and:
521 opName = cir::AtomicFetchOp::getOperationName();
522 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
523 cir::AtomicFetchKind::And);
524 break;
525
526 case AtomicExpr::AO__atomic_or_fetch:
527 fetchFirst = false;
528 [[fallthrough]];
529 case AtomicExpr::AO__c11_atomic_fetch_or:
530 case AtomicExpr::AO__atomic_fetch_or:
531 opName = cir::AtomicFetchOp::getOperationName();
532 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
533 cir::AtomicFetchKind::Or);
534 break;
535
536 case AtomicExpr::AO__atomic_xor_fetch:
537 fetchFirst = false;
538 [[fallthrough]];
539 case AtomicExpr::AO__c11_atomic_fetch_xor:
540 case AtomicExpr::AO__atomic_fetch_xor:
541 opName = cir::AtomicFetchOp::getOperationName();
542 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
543 cir::AtomicFetchKind::Xor);
544 break;
545
546 case AtomicExpr::AO__atomic_nand_fetch:
547 fetchFirst = false;
548 [[fallthrough]];
549 case AtomicExpr::AO__c11_atomic_fetch_nand:
550 case AtomicExpr::AO__atomic_fetch_nand:
551 opName = cir::AtomicFetchOp::getOperationName();
552 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
553 cir::AtomicFetchKind::Nand);
554 break;
555
556 case AtomicExpr::AO__atomic_test_and_set: {
557 auto op = cir::AtomicTestAndSetOp::create(
558 builder, loc, ptr.getPointer(), order,
559 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
560 expr->isVolatile());
561 builder.createStore(loc, op, dest);
562 return;
563 }
564
565 case AtomicExpr::AO__atomic_clear: {
566 cir::AtomicClearOp::create(
567 builder, loc, ptr.getPointer(), order,
568 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
569 expr->isVolatile());
570 return;
571 }
572
573 case AtomicExpr::AO__opencl_atomic_init:
574
575 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
576 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
577
578 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
579 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
580
581 case AtomicExpr::AO__scoped_atomic_compare_exchange:
582 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
583
584 case AtomicExpr::AO__opencl_atomic_load:
585 case AtomicExpr::AO__hip_atomic_load:
586
587 case AtomicExpr::AO__opencl_atomic_store:
588 case AtomicExpr::AO__hip_atomic_store:
589
590 case AtomicExpr::AO__hip_atomic_exchange:
591 case AtomicExpr::AO__opencl_atomic_exchange:
592 case AtomicExpr::AO__scoped_atomic_exchange_n:
593 case AtomicExpr::AO__scoped_atomic_exchange:
594
595 case AtomicExpr::AO__scoped_atomic_add_fetch:
596
597 case AtomicExpr::AO__hip_atomic_fetch_add:
598 case AtomicExpr::AO__opencl_atomic_fetch_add:
599 case AtomicExpr::AO__scoped_atomic_fetch_add:
600
601 case AtomicExpr::AO__scoped_atomic_sub_fetch:
602
603 case AtomicExpr::AO__hip_atomic_fetch_sub:
604 case AtomicExpr::AO__opencl_atomic_fetch_sub:
605 case AtomicExpr::AO__scoped_atomic_fetch_sub:
606
607 case AtomicExpr::AO__scoped_atomic_min_fetch:
608
609 case AtomicExpr::AO__hip_atomic_fetch_min:
610 case AtomicExpr::AO__opencl_atomic_fetch_min:
611 case AtomicExpr::AO__scoped_atomic_fetch_min:
612
613 case AtomicExpr::AO__scoped_atomic_max_fetch:
614
615 case AtomicExpr::AO__hip_atomic_fetch_max:
616 case AtomicExpr::AO__opencl_atomic_fetch_max:
617 case AtomicExpr::AO__scoped_atomic_fetch_max:
618
619 case AtomicExpr::AO__scoped_atomic_and_fetch:
620
621 case AtomicExpr::AO__hip_atomic_fetch_and:
622 case AtomicExpr::AO__opencl_atomic_fetch_and:
623 case AtomicExpr::AO__scoped_atomic_fetch_and:
624
625 case AtomicExpr::AO__scoped_atomic_or_fetch:
626
627 case AtomicExpr::AO__hip_atomic_fetch_or:
628 case AtomicExpr::AO__opencl_atomic_fetch_or:
629 case AtomicExpr::AO__scoped_atomic_fetch_or:
630
631 case AtomicExpr::AO__scoped_atomic_xor_fetch:
632
633 case AtomicExpr::AO__hip_atomic_fetch_xor:
634 case AtomicExpr::AO__opencl_atomic_fetch_xor:
635 case AtomicExpr::AO__scoped_atomic_fetch_xor:
636
637 case AtomicExpr::AO__scoped_atomic_nand_fetch:
638
639 case AtomicExpr::AO__scoped_atomic_fetch_nand:
640
641 case AtomicExpr::AO__scoped_atomic_uinc_wrap:
642 case AtomicExpr::AO__scoped_atomic_udec_wrap:
643 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
644 return;
645 }
646
647 assert(!opName.empty() && "expected operation name to build");
648 mlir::Value loadVal1 = builder.createLoad(loc, val1);
649
650 SmallVector<mlir::Value> atomicOperands = {ptr.getPointer(), loadVal1};
651 SmallVector<mlir::Type> atomicResTys = {loadVal1.getType()};
652 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
653 atomicOperands, atomicResTys);
654
655 if (fetchAttr)
656 rmwOp->setAttr("binop", fetchAttr);
657 rmwOp->setAttr("mem_order", orderAttr);
658 if (expr->isVolatile())
659 rmwOp->setAttr("is_volatile", builder.getUnitAttr());
660 if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
661 rmwOp->setAttr("fetch_first", builder.getUnitAttr());
662
663 mlir::Value result = rmwOp->getResult(0);
664 builder.createStore(loc, result, dest);
665}
666
667// Map clang sync scope to CIR sync scope.
668static cir::SyncScopeKind convertSyncScopeToCIR(CIRGenFunction &cgf,
669 SourceRange range,
670 clang::SyncScope scope) {
671 switch (scope) {
672 default: {
674 cgf.cgm.errorNYI(range, "convertSyncScopeToCIR: unhandled sync scope");
675 return cir::SyncScopeKind::System;
676 }
677
679 return cir::SyncScopeKind::SingleThread;
681 return cir::SyncScopeKind::System;
682 }
683}
684
686 Address ptr, Address val1, Address val2,
687 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
688 cir::MemOrder order,
689 const std::optional<Expr::EvalResult> &scopeConst,
690 mlir::Value scopeValue) {
691 std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
692
693 if (!scopeModel) {
694 emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
695 size, order, cir::SyncScopeKind::System);
696 return;
697 }
698
699 if (scopeConst.has_value()) {
700 cir::SyncScopeKind mappedScope = convertSyncScopeToCIR(
701 cgf, expr->getScope()->getSourceRange(),
702 scopeModel->map(scopeConst->Val.getInt().getZExtValue()));
703 emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
704 size, order, mappedScope);
705 return;
706 }
707
709 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: dynamic sync scope");
710}
711
712static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
714 return false;
715 auto memOrder = static_cast<cir::MemOrder>(order);
716 if (isStore)
717 return memOrder != cir::MemOrder::Consume &&
718 memOrder != cir::MemOrder::Acquire &&
719 memOrder != cir::MemOrder::AcquireRelease;
720 if (isLoad)
721 return memOrder != cir::MemOrder::Release &&
722 memOrder != cir::MemOrder::AcquireRelease;
723 return true;
724}
725
727 CIRGenFunction &cgf, mlir::Value order, AtomicExpr *e, Address dest,
728 Address ptr, Address val1, Address val2, Expr *isWeakExpr,
729 Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad,
730 const std::optional<Expr::EvalResult> &scopeConst, mlir::Value scopeValue) {
731 // The memory order is not known at compile-time. The atomic operations
732 // can't handle runtime memory orders; the memory order must be hard coded.
733 // Generate a "switch" statement that converts a runtime value into a
734 // compile-time value.
735 CIRGenBuilderTy &builder = cgf.getBuilder();
736 cir::SwitchOp::create(
737 builder, order.getLoc(), order,
738 [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
739 mlir::Block *switchBlock = builder.getBlock();
740
741 auto emitMemOrderCase = [&](llvm::ArrayRef<cir::MemOrder> caseOrders,
742 cir::MemOrder actualOrder) {
743 if (caseOrders.empty())
744 emitMemOrderDefaultCaseLabel(builder, loc);
745 else
746 emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
747 emitAtomicOp(cgf, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
748 size, actualOrder, scopeConst, scopeValue);
749 builder.createBreak(loc);
750 builder.setInsertionPointToEnd(switchBlock);
751 };
752
753 // default:
754 // Use memory_order_relaxed for relaxed operations and for any memory
755 // order value that is not supported. There is no good way to report
756 // an unsupported memory order at runtime, hence the fallback to
757 // memory_order_relaxed.
758 emitMemOrderCase(/*caseOrders=*/{}, cir::MemOrder::Relaxed);
759
760 if (!isStore) {
761 // case consume:
762 // case acquire:
763 // memory_order_consume is not implemented; it is always treated
764 // like memory_order_acquire. These memory orders are not valid for
765 // write-only operations.
766 emitMemOrderCase({cir::MemOrder::Consume, cir::MemOrder::Acquire},
767 cir::MemOrder::Acquire);
768 }
769
770 if (!isLoad) {
771 // case release:
772 // memory_order_release is not valid for read-only operations.
773 emitMemOrderCase({cir::MemOrder::Release}, cir::MemOrder::Release);
774 }
775
776 if (!isLoad && !isStore) {
777 // case acq_rel:
778 // memory_order_acq_rel is only valid for read-write operations.
779 emitMemOrderCase({cir::MemOrder::AcquireRelease},
780 cir::MemOrder::AcquireRelease);
781 }
782
783 // case seq_cst:
784 emitMemOrderCase({cir::MemOrder::SequentiallyConsistent},
785 cir::MemOrder::SequentiallyConsistent);
786
787 builder.createYield(loc);
788 });
789}
790
792 QualType atomicTy = e->getPtr()->getType()->getPointeeType();
793 QualType memTy = atomicTy;
794 if (const auto *ty = atomicTy->getAs<AtomicType>())
795 memTy = ty->getValueType();
796
797 Expr *isWeakExpr = nullptr;
798 Expr *orderFailExpr = nullptr;
799
800 Address val1 = Address::invalid();
801 Address val2 = Address::invalid();
802 Address dest = Address::invalid();
804
806 if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
807 LValue lvalue = makeAddrLValue(ptr, atomicTy);
808 emitAtomicInit(e->getVal1(), lvalue);
809 return RValue::get(nullptr);
810 }
811
812 TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
813 uint64_t size = typeInfo.Width.getQuantity();
814
815 // Emit the memory order operand, and try to evaluate it as a constant.
816 mlir::Value order = emitScalarExpr(e->getOrder());
817 std::optional<Expr::EvalResult> orderConst;
818 if (Expr::EvalResult eval; e->getOrder()->EvaluateAsInt(eval, getContext()))
819 orderConst.emplace(std::move(eval));
820
821 // Emit the sync scope operand, and try to evaluate it as a constant.
822 mlir::Value scope =
823 e->getScopeModel() ? emitScalarExpr(e->getScope()) : nullptr;
824 std::optional<Expr::EvalResult> scopeConst;
825 if (Expr::EvalResult eval;
826 e->getScopeModel() && e->getScope()->EvaluateAsInt(eval, getContext()))
827 scopeConst.emplace(std::move(eval));
828
829 bool shouldCastToIntPtrTy = true;
830
831 switch (e->getOp()) {
832 default:
833 cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
834 return RValue::get(nullptr);
835
836 case AtomicExpr::AO__c11_atomic_init:
837 llvm_unreachable("already handled above with emitAtomicInit");
838
839 case AtomicExpr::AO__atomic_load_n:
840 case AtomicExpr::AO__scoped_atomic_load_n:
841 case AtomicExpr::AO__c11_atomic_load:
842 case AtomicExpr::AO__atomic_test_and_set:
843 case AtomicExpr::AO__atomic_clear:
844 break;
845
846 case AtomicExpr::AO__atomic_load:
847 case AtomicExpr::AO__scoped_atomic_load:
849 break;
850
851 case AtomicExpr::AO__atomic_store:
852 case AtomicExpr::AO__scoped_atomic_store:
854 break;
855
856 case AtomicExpr::AO__atomic_exchange:
859 break;
860
861 case AtomicExpr::AO__atomic_compare_exchange:
862 case AtomicExpr::AO__atomic_compare_exchange_n:
863 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
864 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
866 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
867 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
869 else
870 val2 = emitValToTemp(*this, e->getVal2());
871 orderFailExpr = e->getOrderFail();
872 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
873 e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
874 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
875 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
876 isWeakExpr = e->getWeak();
877 break;
878
879 case AtomicExpr::AO__c11_atomic_fetch_add:
880 case AtomicExpr::AO__c11_atomic_fetch_sub:
881 if (memTy->isPointerType()) {
882 cgm.errorNYI(e->getSourceRange(),
883 "atomic fetch-and-add and fetch-and-sub for pointers");
884 return RValue::get(nullptr);
885 }
886 [[fallthrough]];
887 case AtomicExpr::AO__atomic_fetch_add:
888 case AtomicExpr::AO__atomic_fetch_max:
889 case AtomicExpr::AO__atomic_fetch_min:
890 case AtomicExpr::AO__atomic_fetch_sub:
891 case AtomicExpr::AO__atomic_add_fetch:
892 case AtomicExpr::AO__atomic_max_fetch:
893 case AtomicExpr::AO__atomic_min_fetch:
894 case AtomicExpr::AO__atomic_sub_fetch:
895 case AtomicExpr::AO__c11_atomic_fetch_max:
896 case AtomicExpr::AO__c11_atomic_fetch_min:
897 shouldCastToIntPtrTy = !memTy->isFloatingType();
898 [[fallthrough]];
899
900 case AtomicExpr::AO__atomic_fetch_and:
901 case AtomicExpr::AO__atomic_fetch_nand:
902 case AtomicExpr::AO__atomic_fetch_or:
903 case AtomicExpr::AO__atomic_fetch_xor:
904 case AtomicExpr::AO__atomic_and_fetch:
905 case AtomicExpr::AO__atomic_nand_fetch:
906 case AtomicExpr::AO__atomic_or_fetch:
907 case AtomicExpr::AO__atomic_xor_fetch:
908 case AtomicExpr::AO__atomic_exchange_n:
909 case AtomicExpr::AO__atomic_store_n:
910 case AtomicExpr::AO__c11_atomic_fetch_and:
911 case AtomicExpr::AO__c11_atomic_fetch_nand:
912 case AtomicExpr::AO__c11_atomic_fetch_or:
913 case AtomicExpr::AO__c11_atomic_fetch_xor:
914 case AtomicExpr::AO__c11_atomic_exchange:
915 case AtomicExpr::AO__c11_atomic_store:
916 case AtomicExpr::AO__scoped_atomic_store_n:
917 val1 = emitValToTemp(*this, e->getVal1());
918 break;
919 }
920
921 QualType resultTy = e->getType().getUnqualifiedType();
922
923 // The inlined atomics only function on iN types, where N is a power of 2. We
924 // need to make sure (via temporaries if necessary) that all incoming values
925 // are compatible.
926 LValue atomicValue = makeAddrLValue(ptr, atomicTy);
927 AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
928
929 if (shouldCastToIntPtrTy) {
930 ptr = atomics.castToAtomicIntPointer(ptr);
931 if (val1.isValid())
932 val1 = atomics.convertToAtomicIntPointer(val1);
933 }
934 if (dest.isValid()) {
935 if (shouldCastToIntPtrTy)
936 dest = atomics.castToAtomicIntPointer(dest);
937 } else if (e->isCmpXChg()) {
938 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()), "cmpxchg.bool");
939 } else if (e->getOp() == AtomicExpr::AO__atomic_test_and_set) {
940 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()),
941 "test_and_set.bool");
942 } else if (!resultTy->isVoidType()) {
943 dest = atomics.createTempAlloca();
944 if (shouldCastToIntPtrTy)
945 dest = atomics.castToAtomicIntPointer(dest);
946 }
947
948 bool powerOf2Size = (size & (size - 1)) == 0;
949 bool useLibCall = !powerOf2Size || (size > 16);
950
951 // For atomics larger than 16 bytes, emit a libcall from the frontend. This
952 // avoids the overhead of dealing with excessively-large value types in IR.
953 // Non-power-of-2 values also lower to libcall here, as they are not currently
954 // permitted in IR instructions (although that constraint could be relaxed in
955 // the future). For other cases where a libcall is required on a given
956 // platform, we let the backend handle it (this includes handling for all of
957 // the size-optimized libcall variants, which are only valid up to 16 bytes.)
958 //
959 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
960 if (useLibCall) {
962 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
963 return RValue::get(nullptr);
964 }
965
966 bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
967 e->getOp() == AtomicExpr::AO__opencl_atomic_store ||
968 e->getOp() == AtomicExpr::AO__hip_atomic_store ||
969 e->getOp() == AtomicExpr::AO__atomic_store ||
970 e->getOp() == AtomicExpr::AO__atomic_store_n ||
971 e->getOp() == AtomicExpr::AO__scoped_atomic_store ||
972 e->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
973 e->getOp() == AtomicExpr::AO__atomic_clear;
974 bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
975 e->getOp() == AtomicExpr::AO__opencl_atomic_load ||
976 e->getOp() == AtomicExpr::AO__hip_atomic_load ||
977 e->getOp() == AtomicExpr::AO__atomic_load ||
978 e->getOp() == AtomicExpr::AO__atomic_load_n ||
979 e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
980 e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
981
982 if (orderConst.has_value()) {
983 // We have evaluated the memory order as an integer constant in orderConst.
984 // We should not ever get to a case where the ordering isn't a valid CABI
985 // value, but it's hard to enforce that in general.
986 uint64_t ord = orderConst->Val.getInt().getZExtValue();
987 if (isMemOrderValid(ord, isStore, isLoad))
988 emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
989 size, static_cast<cir::MemOrder>(ord), scopeConst, scope);
990 } else {
991 emitAtomicExprWithDynamicMemOrder(*this, order, e, dest, ptr, val1, val2,
992 isWeakExpr, orderFailExpr, size, isStore,
993 isLoad, scopeConst, scope);
994 }
995
996 if (resultTy->isVoidType())
997 return RValue::get(nullptr);
998
999 return convertTempToRValue(
1000 dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
1001 e->getExprLoc());
1002}
1003
1004void CIRGenFunction::emitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
1005 bool isVolatile = dest.isVolatileQualified();
1006 auto order = cir::MemOrder::SequentiallyConsistent;
1007 if (!dest.getType()->isAtomicType()) {
1009 }
1010 return emitAtomicStore(rvalue, dest, order, isVolatile, isInit);
1011}
1012
1013/// Emit a store to an l-value of atomic type.
1014///
1015/// Note that the r-value is expected to be an r-value of the atomic type; this
1016/// means that for aggregate r-values, it should include storage for any padding
1017/// that was necessary.
1019 cir::MemOrder order, bool isVolatile,
1020 bool isInit) {
1021 // If this is an aggregate r-value, it should agree in type except
1022 // maybe for address-space qualification.
1023 mlir::Location loc = dest.getPointer().getLoc();
1024 assert(!rvalue.isAggregate() ||
1026 dest.getAddress().getElementType());
1027
1028 AtomicInfo atomics(*this, dest, loc);
1029 LValue lvalue = atomics.getAtomicLValue();
1030
1031 if (lvalue.isSimple()) {
1032 // If this is an initialization, just put the value there normally.
1033 if (isInit) {
1034 atomics.emitCopyIntoMemory(rvalue);
1035 return;
1036 }
1037
1038 // Check whether we should use a library call.
1039 if (atomics.shouldUseLibCall()) {
1041 cgm.errorNYI(loc, "emitAtomicStore: atomic store with library call");
1042 return;
1043 }
1044
1045 // Okay, we're doing this natively.
1046 mlir::Value valueToStore = atomics.convertRValueToInt(rvalue);
1047
1048 // Do the atomic store.
1049 Address addr = atomics.getAtomicAddress();
1050 if (mlir::Value value = atomics.getScalarRValValueOrNull(rvalue)) {
1051 if (shouldCastToInt(value.getType(), /*CmpXchg=*/false)) {
1052 addr = atomics.castToAtomicIntPointer(addr);
1053 valueToStore =
1054 builder.createIntCast(valueToStore, addr.getElementType());
1055 }
1056 }
1057 cir::StoreOp store = builder.createStore(loc, valueToStore, addr);
1058
1059 // Initializations don't need to be atomic.
1060 if (!isInit) {
1062 store.setMemOrder(order);
1063 }
1064
1065 // Other decoration.
1066 if (isVolatile)
1067 store.setIsVolatile(true);
1068
1070 return;
1071 }
1072
1073 cgm.errorNYI(loc, "emitAtomicStore: non-simple atomic lvalue");
1075}
1076
1078 AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
1079
1080 switch (atomics.getEvaluationKind()) {
1081 case cir::TEK_Scalar: {
1082 mlir::Value value = emitScalarExpr(init);
1083 atomics.emitCopyIntoMemory(RValue::get(value));
1084 return;
1085 }
1086
1087 case cir::TEK_Complex: {
1088 mlir::Value value = emitComplexExpr(init);
1089 atomics.emitCopyIntoMemory(RValue::get(value));
1090 return;
1091 }
1092
1093 case cir::TEK_Aggregate: {
1094 // Fix up the destination if the initializer isn't an expression
1095 // of atomic type.
1096 bool zeroed = false;
1097 if (!init->getType()->isAtomicType()) {
1098 zeroed = atomics.emitMemSetZeroIfNecessary();
1099 dest = atomics.projectValue();
1100 }
1101
1102 // Evaluate the expression directly into the destination.
1108
1109 emitAggExpr(init, slot);
1110 return;
1111 }
1112 }
1113
1114 llvm_unreachable("bad evaluation kind");
1115}
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Type orderType, llvm::ArrayRef< cir::MemOrder > orders)
static cir::SyncScopeKind convertSyncScopeToCIR(CIRGenFunction &cgf, SourceRange range, clang::SyncScope scope)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static void emitMemOrderDefaultCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc)
static void emitAtomicExprWithDynamicMemOrder(CIRGenFunction &cgf, mlir::Value order, AtomicExpr *e, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad, const std::optional< Expr::EvalResult > &scopeConst, mlir::Value scopeValue)
static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult, const ASTContext &Ctx, Expr::SideEffectsKind AllowSideEffects, EvalInfo &Info)
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
APSInt & getInt()
Definition APValue.h:489
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6814
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
Definition Expr.h:6963
Expr * getVal2() const
Definition Expr.h:6865
Expr * getOrder() const
Definition Expr.h:6848
Expr * getScope() const
Definition Expr.h:6851
bool isCmpXChg() const
Definition Expr.h:6898
AtomicOp getOp() const
Definition Expr.h:6877
Expr * getVal1() const
Definition Expr.h:6855
Expr * getPtr() const
Definition Expr.h:6845
Expr * getWeak() const
Definition Expr.h:6871
Expr * getOrderFail() const
Definition Expr.h:6861
bool isVolatile() const
Definition Expr.h:6894
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:76
mlir::Value getPointer() const
Definition Address.h:90
mlir::Type getElementType() const
Definition Address.h:117
static Address invalid()
Definition Address.h:69
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:130
bool isValid() const
Definition Address.h:70
An aggregate value slot.
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::SyncScopeKindAttr scope={}, cir::MemOrderAttr order={})
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
Address getAddress() const
clang::QualType getType() const
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8333
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8387
A trivial tuple used to represent a source range.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
Definition TargetInfo.h:864
bool isVoidType() const
Definition TypeBase.h:8892
bool isPointerType() const
Definition TypeBase.h:8530
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isAtomicType() const
Definition TypeBase.h:8718
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9112
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
unsigned long uint64_t
static bool atomicInfoGetAtomicPointer()
static bool aggValueSlotGC()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool atomicUseLibCall()
static bool atomicOpenMP()
static bool atomicMicrosoftVolatile()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647