27 CharUnits atomicAlign;
30 bool useLibCall =
true;
35 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
36 : cgf(cgf), loc(loc) {
37 assert(!lvalue.isGlobalReg());
38 ASTContext &ctx = cgf.getContext();
39 if (lvalue.isSimple()) {
40 atomicTy = lvalue.getType();
41 if (
auto *ty = atomicTy->getAs<AtomicType>())
42 valueTy = ty->getValueType();
45 evaluationKind = cgf.getEvaluationKind(valueTy);
48 TypeInfo atomicTypeInfo = ctx.
getTypeInfo(atomicTy);
51 valueSizeInBits = valueTypeInfo.
Width;
52 atomicSizeInBits = atomicTypeInfo.
Width;
53 assert(valueSizeInBits <= atomicSizeInBits);
54 assert(valueAlignInBits <= atomicAlignInBits);
58 if (lvalue.getAlignment().isZero())
59 lvalue.setAlignment(atomicAlign);
61 this->lvalue = lvalue;
64 cgf.cgm.errorNYI(loc,
"AtomicInfo: non-simple lvalue");
67 atomicSizeInBits, ctx.
toBits(lvalue.getAlignment()));
70 QualType getValueType()
const {
return valueTy; }
71 CharUnits getAtomicAlignment()
const {
return atomicAlign; }
73 mlir::Value getAtomicPointer()
const {
74 if (lvalue.isSimple())
75 return lvalue.getPointer();
79 bool shouldUseLibCall()
const {
return useLibCall; }
80 const LValue &getAtomicLValue()
const {
return lvalue; }
81 Address getAtomicAddress()
const {
83 if (lvalue.isSimple()) {
84 elemTy = lvalue.getAddress().getElementType();
87 cgf.cgm.errorNYI(loc,
"AtomicInfo::getAtomicAddress: non-simple lvalue");
89 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
98 bool hasPadding()
const {
return (valueSizeInBits != atomicSizeInBits); }
100 bool emitMemSetZeroIfNecessary()
const;
102 mlir::Value getScalarRValValueOrNull(RValue rvalue)
const;
106 Address castToAtomicIntPointer(Address addr)
const;
111 Address convertToAtomicIntPointer(Address addr)
const;
114 mlir::Value convertRValueToInt(RValue rvalue,
bool cmpxchg =
false)
const;
117 void emitCopyIntoMemory(RValue rvalue)
const;
120 LValue projectValue()
const {
121 assert(lvalue.isSimple());
122 Address addr = getAtomicAddress();
124 cgf.cgm.errorNYI(loc,
"AtomicInfo::projectValue: padding");
128 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
132 Address createTempAlloca()
const;
135 bool requiresMemSetZero(mlir::Type ty)
const;
151 uint64_t expectedSize) {
158bool AtomicInfo::requiresMemSetZero(mlir::Type ty)
const {
164 switch (getEvaluationKind()) {
171 mlir::cast<cir::ComplexType>(ty).getElementType(),
172 atomicSizeInBits / 2);
177 llvm_unreachable(
"bad evaluation kind");
180Address AtomicInfo::convertToAtomicIntPointer(Address addr)
const {
183 if (sourceSizeInBits != atomicSizeInBits) {
186 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
189 return castToAtomicIntPointer(addr);
192Address AtomicInfo::createTempAlloca()
const {
194 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
196 getAtomicAlignment(), loc,
"atomic-temp");
199 if (lvalue.isBitField()) {
200 cgf.
cgm.
errorNYI(loc,
"AtomicInfo::createTempAlloca: bitfield lvalue");
206mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue rvalue)
const {
207 if (rvalue.
isScalar() && (!hasPadding() || !lvalue.isSimple()))
212Address AtomicInfo::castToAtomicIntPointer(Address addr)
const {
215 if (intTy && intTy.getWidth() == atomicSizeInBits)
221bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
222 assert(lvalue.isSimple());
223 Address addr = lvalue.getAddress();
228 "AtomicInfo::emitMemSetZeroIfNecaessary: emit memset zero");
238 if (cir::isAnyFloatingPointType(valueTy))
243mlir::Value AtomicInfo::convertRValueToInt(RValue rvalue,
bool cmpxchg)
const {
246 if (mlir::Value value = getScalarRValValueOrNull(rvalue)) {
251 loc,
"AtomicInfo::convertRValueToInt: cast scalar rvalue to int");
256 loc,
"AtomicInfo::convertRValueToInt: cast non-scalar rvalue to int");
262void AtomicInfo::emitCopyIntoMemory(RValue rvalue)
const {
263 assert(lvalue.isSimple());
269 cgf.
cgm.
errorNYI(
"copying aggregate into atomic lvalue");
276 emitMemSetZeroIfNecessary();
279 LValue tempLValue = projectValue();
285 cgf.
cgm.
errorNYI(
"copying complex into atomic lvalue");
290 mlir::Location loc) {
291 mlir::ArrayAttr ordersAttr = builder.getArrayAttr({});
292 mlir::OpBuilder::InsertPoint insertPoint;
293 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Default,
295 builder.restoreInsertionPoint(insertPoint);
301 mlir::Type orderType,
304 for (cir::MemOrder order : orders)
305 orderAttrs.push_back(cir::IntAttr::get(orderType,
static_cast<int>(order)));
306 mlir::ArrayAttr ordersAttr = builder.getArrayAttr(orderAttrs);
308 mlir::OpBuilder::InsertPoint insertPoint;
309 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Anyof,
311 builder.restoreInsertionPoint(insertPoint);
317 cir::MemOrder successOrder,
318 cir::MemOrder failureOrder) {
322 mlir::Value expected = builder.
createLoad(loc, val1);
323 mlir::Value desired = builder.
createLoad(loc, val2);
325 auto cmpxchg = cir::AtomicCmpXchgOp::create(
333 cmpxchg.setWeak(isWeak);
335 mlir::Value failed = builder.
createNot(cmpxchg.getSuccess());
336 cir::IfOp::create(builder, loc, failed,
false,
337 [&](mlir::OpBuilder &, mlir::Location) {
338 auto ptrTy = mlir::cast<cir::PointerType>(
357 Expr *failureOrderExpr, uint64_t size,
358 cir::MemOrder successOrder) {
361 uint64_t failureOrderInt = failureOrderEval.
Val.
getInt().getZExtValue();
363 cir::MemOrder failureOrder;
365 failureOrder = cir::MemOrder::Relaxed;
367 switch ((cir::MemOrder)failureOrderInt) {
368 case cir::MemOrder::Relaxed:
371 case cir::MemOrder::Release:
372 case cir::MemOrder::AcquireRelease:
373 failureOrder = cir::MemOrder::Relaxed;
375 case cir::MemOrder::Consume:
376 case cir::MemOrder::Acquire:
377 failureOrder = cir::MemOrder::Acquire;
379 case cir::MemOrder::SequentiallyConsistent:
380 failureOrder = cir::MemOrder::SequentiallyConsistent;
396 "emitAtomicCmpXchgFailureSet: non-constant failure order");
401 Expr *isWeakExpr,
Expr *failureOrderExpr, int64_t size,
402 cir::MemOrder order, cir::SyncScopeKind scope) {
404 llvm::StringRef opName;
407 mlir::Location loc = cgf.
getLoc(
expr->getSourceRange());
408 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
409 auto scopeAttr = cir::SyncScopeKindAttr::get(builder.getContext(), scope);
410 cir::AtomicFetchKindAttr fetchAttr;
411 bool fetchFirst =
true;
413 switch (
expr->getOp()) {
414 case AtomicExpr::AO__c11_atomic_init:
415 llvm_unreachable(
"already handled!");
417 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
419 val2, failureOrderExpr, size, order);
422 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
424 val2, failureOrderExpr, size, order);
427 case AtomicExpr::AO__atomic_compare_exchange:
428 case AtomicExpr::AO__atomic_compare_exchange_n: {
432 failureOrderExpr, size, order);
436 "emitAtomicOp: non-constant isWeak");
441 case AtomicExpr::AO__c11_atomic_load:
442 case AtomicExpr::AO__atomic_load_n:
443 case AtomicExpr::AO__atomic_load:
444 case AtomicExpr::AO__scoped_atomic_load_n:
445 case AtomicExpr::AO__scoped_atomic_load: {
449 load->setAttr(
"mem_order", orderAttr);
450 load->setAttr(
"sync_scope", scopeAttr);
452 builder.
createStore(loc, load->getResult(0), dest);
456 case AtomicExpr::AO__c11_atomic_store:
457 case AtomicExpr::AO__atomic_store_n:
458 case AtomicExpr::AO__atomic_store:
459 case AtomicExpr::AO__scoped_atomic_store:
460 case AtomicExpr::AO__scoped_atomic_store_n: {
461 cir::LoadOp loadVal1 = builder.
createLoad(loc, val1);
466 mlir::IntegerAttr{}, scopeAttr, orderAttr);
470 case AtomicExpr::AO__c11_atomic_exchange:
471 case AtomicExpr::AO__atomic_exchange_n:
472 case AtomicExpr::AO__atomic_exchange:
473 opName = cir::AtomicXchgOp::getOperationName();
476 case AtomicExpr::AO__atomic_add_fetch:
479 case AtomicExpr::AO__c11_atomic_fetch_add:
480 case AtomicExpr::AO__atomic_fetch_add:
481 opName = cir::AtomicFetchOp::getOperationName();
482 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
483 cir::AtomicFetchKind::Add);
486 case AtomicExpr::AO__atomic_sub_fetch:
489 case AtomicExpr::AO__c11_atomic_fetch_sub:
490 case AtomicExpr::AO__atomic_fetch_sub:
491 opName = cir::AtomicFetchOp::getOperationName();
492 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
493 cir::AtomicFetchKind::Sub);
496 case AtomicExpr::AO__atomic_min_fetch:
499 case AtomicExpr::AO__c11_atomic_fetch_min:
500 case AtomicExpr::AO__atomic_fetch_min:
501 opName = cir::AtomicFetchOp::getOperationName();
502 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
503 cir::AtomicFetchKind::Min);
506 case AtomicExpr::AO__atomic_max_fetch:
509 case AtomicExpr::AO__c11_atomic_fetch_max:
510 case AtomicExpr::AO__atomic_fetch_max:
511 opName = cir::AtomicFetchOp::getOperationName();
512 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
513 cir::AtomicFetchKind::Max);
516 case AtomicExpr::AO__atomic_and_fetch:
519 case AtomicExpr::AO__c11_atomic_fetch_and:
520 case AtomicExpr::AO__atomic_fetch_and:
521 opName = cir::AtomicFetchOp::getOperationName();
522 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
523 cir::AtomicFetchKind::And);
526 case AtomicExpr::AO__atomic_or_fetch:
529 case AtomicExpr::AO__c11_atomic_fetch_or:
530 case AtomicExpr::AO__atomic_fetch_or:
531 opName = cir::AtomicFetchOp::getOperationName();
532 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
533 cir::AtomicFetchKind::Or);
536 case AtomicExpr::AO__atomic_xor_fetch:
539 case AtomicExpr::AO__c11_atomic_fetch_xor:
540 case AtomicExpr::AO__atomic_fetch_xor:
541 opName = cir::AtomicFetchOp::getOperationName();
542 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
543 cir::AtomicFetchKind::Xor);
546 case AtomicExpr::AO__atomic_nand_fetch:
549 case AtomicExpr::AO__c11_atomic_fetch_nand:
550 case AtomicExpr::AO__atomic_fetch_nand:
551 opName = cir::AtomicFetchOp::getOperationName();
552 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
553 cir::AtomicFetchKind::Nand);
556 case AtomicExpr::AO__atomic_test_and_set: {
557 auto op = cir::AtomicTestAndSetOp::create(
565 case AtomicExpr::AO__atomic_clear: {
566 cir::AtomicClearOp::create(
573 case AtomicExpr::AO__opencl_atomic_init:
575 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
576 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
578 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
579 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
581 case AtomicExpr::AO__scoped_atomic_compare_exchange:
582 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
584 case AtomicExpr::AO__opencl_atomic_load:
585 case AtomicExpr::AO__hip_atomic_load:
587 case AtomicExpr::AO__opencl_atomic_store:
588 case AtomicExpr::AO__hip_atomic_store:
590 case AtomicExpr::AO__hip_atomic_exchange:
591 case AtomicExpr::AO__opencl_atomic_exchange:
592 case AtomicExpr::AO__scoped_atomic_exchange_n:
593 case AtomicExpr::AO__scoped_atomic_exchange:
595 case AtomicExpr::AO__scoped_atomic_add_fetch:
597 case AtomicExpr::AO__hip_atomic_fetch_add:
598 case AtomicExpr::AO__opencl_atomic_fetch_add:
599 case AtomicExpr::AO__scoped_atomic_fetch_add:
601 case AtomicExpr::AO__scoped_atomic_sub_fetch:
603 case AtomicExpr::AO__hip_atomic_fetch_sub:
604 case AtomicExpr::AO__opencl_atomic_fetch_sub:
605 case AtomicExpr::AO__scoped_atomic_fetch_sub:
607 case AtomicExpr::AO__scoped_atomic_min_fetch:
609 case AtomicExpr::AO__hip_atomic_fetch_min:
610 case AtomicExpr::AO__opencl_atomic_fetch_min:
611 case AtomicExpr::AO__scoped_atomic_fetch_min:
613 case AtomicExpr::AO__scoped_atomic_max_fetch:
615 case AtomicExpr::AO__hip_atomic_fetch_max:
616 case AtomicExpr::AO__opencl_atomic_fetch_max:
617 case AtomicExpr::AO__scoped_atomic_fetch_max:
619 case AtomicExpr::AO__scoped_atomic_and_fetch:
621 case AtomicExpr::AO__hip_atomic_fetch_and:
622 case AtomicExpr::AO__opencl_atomic_fetch_and:
623 case AtomicExpr::AO__scoped_atomic_fetch_and:
625 case AtomicExpr::AO__scoped_atomic_or_fetch:
627 case AtomicExpr::AO__hip_atomic_fetch_or:
628 case AtomicExpr::AO__opencl_atomic_fetch_or:
629 case AtomicExpr::AO__scoped_atomic_fetch_or:
631 case AtomicExpr::AO__scoped_atomic_xor_fetch:
633 case AtomicExpr::AO__hip_atomic_fetch_xor:
634 case AtomicExpr::AO__opencl_atomic_fetch_xor:
635 case AtomicExpr::AO__scoped_atomic_fetch_xor:
637 case AtomicExpr::AO__scoped_atomic_nand_fetch:
639 case AtomicExpr::AO__scoped_atomic_fetch_nand:
641 case AtomicExpr::AO__scoped_atomic_uinc_wrap:
642 case AtomicExpr::AO__scoped_atomic_udec_wrap:
647 assert(!opName.empty() &&
"expected operation name to build");
648 mlir::Value loadVal1 = builder.
createLoad(loc, val1);
652 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
653 atomicOperands, atomicResTys);
656 rmwOp->setAttr(
"binop", fetchAttr);
657 rmwOp->setAttr(
"mem_order", orderAttr);
658 if (
expr->isVolatile())
659 rmwOp->setAttr(
"is_volatile", builder.getUnitAttr());
660 if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
661 rmwOp->setAttr(
"fetch_first", builder.getUnitAttr());
663 mlir::Value result = rmwOp->getResult(0);
674 cgf.
cgm.
errorNYI(range,
"convertSyncScopeToCIR: unhandled sync scope");
675 return cir::SyncScopeKind::System;
679 return cir::SyncScopeKind::SingleThread;
681 return cir::SyncScopeKind::System;
687 Expr *isWeakExpr,
Expr *failureOrderExpr, int64_t size,
689 const std::optional<Expr::EvalResult> &scopeConst,
690 mlir::Value scopeValue) {
691 std::unique_ptr<AtomicScopeModel> scopeModel =
expr->getScopeModel();
694 emitAtomicOp(cgf,
expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
695 size, order, cir::SyncScopeKind::System);
699 if (scopeConst.has_value()) {
701 cgf,
expr->getScope()->getSourceRange(),
702 scopeModel->map(scopeConst->Val.getInt().getZExtValue()));
703 emitAtomicOp(cgf,
expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
704 size, order, mappedScope);
709 cgf.
cgm.
errorNYI(
expr->getSourceRange(),
"emitAtomicOp: dynamic sync scope");
715 auto memOrder =
static_cast<cir::MemOrder
>(order);
717 return memOrder != cir::MemOrder::Consume &&
718 memOrder != cir::MemOrder::Acquire &&
719 memOrder != cir::MemOrder::AcquireRelease;
721 return memOrder != cir::MemOrder::Release &&
722 memOrder != cir::MemOrder::AcquireRelease;
729 Expr *orderFailExpr, uint64_t size,
bool isStore,
bool isLoad,
730 const std::optional<Expr::EvalResult> &scopeConst, mlir::Value scopeValue) {
736 cir::SwitchOp::create(
737 builder, order.getLoc(), order,
738 [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
739 mlir::Block *switchBlock = builder.getBlock();
741 auto emitMemOrderCase = [&](llvm::ArrayRef<cir::MemOrder> caseOrders,
742 cir::MemOrder actualOrder) {
743 if (caseOrders.empty())
744 emitMemOrderDefaultCaseLabel(builder, loc);
746 emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
747 emitAtomicOp(cgf, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
748 size, actualOrder, scopeConst, scopeValue);
749 builder.createBreak(loc);
750 builder.setInsertionPointToEnd(switchBlock);
758 emitMemOrderCase({}, cir::MemOrder::Relaxed);
766 emitMemOrderCase({cir::MemOrder::Consume, cir::MemOrder::Acquire},
767 cir::MemOrder::Acquire);
773 emitMemOrderCase({cir::MemOrder::Release}, cir::MemOrder::Release);
776 if (!isLoad && !isStore) {
779 emitMemOrderCase({cir::MemOrder::AcquireRelease},
780 cir::MemOrder::AcquireRelease);
784 emitMemOrderCase({cir::MemOrder::SequentiallyConsistent},
785 cir::MemOrder::SequentiallyConsistent);
795 memTy = ty->getValueType();
797 Expr *isWeakExpr =
nullptr;
798 Expr *orderFailExpr =
nullptr;
806 if (e->
getOp() == AtomicExpr::AO__c11_atomic_init) {
817 std::optional<Expr::EvalResult> orderConst;
819 orderConst.emplace(std::move(eval));
824 std::optional<Expr::EvalResult> scopeConst;
827 scopeConst.emplace(std::move(eval));
829 bool shouldCastToIntPtrTy =
true;
831 switch (e->
getOp()) {
836 case AtomicExpr::AO__c11_atomic_init:
837 llvm_unreachable(
"already handled above with emitAtomicInit");
839 case AtomicExpr::AO__atomic_load_n:
840 case AtomicExpr::AO__scoped_atomic_load_n:
841 case AtomicExpr::AO__c11_atomic_load:
842 case AtomicExpr::AO__atomic_test_and_set:
843 case AtomicExpr::AO__atomic_clear:
846 case AtomicExpr::AO__atomic_load:
847 case AtomicExpr::AO__scoped_atomic_load:
851 case AtomicExpr::AO__atomic_store:
852 case AtomicExpr::AO__scoped_atomic_store:
856 case AtomicExpr::AO__atomic_exchange:
861 case AtomicExpr::AO__atomic_compare_exchange:
862 case AtomicExpr::AO__atomic_compare_exchange_n:
863 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
864 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
866 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
867 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
872 if (e->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
873 e->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
874 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
875 e->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
879 case AtomicExpr::AO__c11_atomic_fetch_add:
880 case AtomicExpr::AO__c11_atomic_fetch_sub:
883 "atomic fetch-and-add and fetch-and-sub for pointers");
887 case AtomicExpr::AO__atomic_fetch_add:
888 case AtomicExpr::AO__atomic_fetch_max:
889 case AtomicExpr::AO__atomic_fetch_min:
890 case AtomicExpr::AO__atomic_fetch_sub:
891 case AtomicExpr::AO__atomic_add_fetch:
892 case AtomicExpr::AO__atomic_max_fetch:
893 case AtomicExpr::AO__atomic_min_fetch:
894 case AtomicExpr::AO__atomic_sub_fetch:
895 case AtomicExpr::AO__c11_atomic_fetch_max:
896 case AtomicExpr::AO__c11_atomic_fetch_min:
900 case AtomicExpr::AO__atomic_fetch_and:
901 case AtomicExpr::AO__atomic_fetch_nand:
902 case AtomicExpr::AO__atomic_fetch_or:
903 case AtomicExpr::AO__atomic_fetch_xor:
904 case AtomicExpr::AO__atomic_and_fetch:
905 case AtomicExpr::AO__atomic_nand_fetch:
906 case AtomicExpr::AO__atomic_or_fetch:
907 case AtomicExpr::AO__atomic_xor_fetch:
908 case AtomicExpr::AO__atomic_exchange_n:
909 case AtomicExpr::AO__atomic_store_n:
910 case AtomicExpr::AO__c11_atomic_fetch_and:
911 case AtomicExpr::AO__c11_atomic_fetch_nand:
912 case AtomicExpr::AO__c11_atomic_fetch_or:
913 case AtomicExpr::AO__c11_atomic_fetch_xor:
914 case AtomicExpr::AO__c11_atomic_exchange:
915 case AtomicExpr::AO__c11_atomic_store:
916 case AtomicExpr::AO__scoped_atomic_store_n:
929 if (shouldCastToIntPtrTy) {
930 ptr = atomics.castToAtomicIntPointer(ptr);
932 val1 = atomics.convertToAtomicIntPointer(val1);
935 if (shouldCastToIntPtrTy)
936 dest = atomics.castToAtomicIntPointer(dest);
939 }
else if (e->
getOp() == AtomicExpr::AO__atomic_test_and_set) {
941 "test_and_set.bool");
943 dest = atomics.createTempAlloca();
944 if (shouldCastToIntPtrTy)
945 dest = atomics.castToAtomicIntPointer(dest);
948 bool powerOf2Size = (size & (size - 1)) == 0;
949 bool useLibCall = !powerOf2Size || (size > 16);
966 bool isStore = e->
getOp() == AtomicExpr::AO__c11_atomic_store ||
967 e->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
968 e->
getOp() == AtomicExpr::AO__hip_atomic_store ||
969 e->
getOp() == AtomicExpr::AO__atomic_store ||
970 e->
getOp() == AtomicExpr::AO__atomic_store_n ||
971 e->
getOp() == AtomicExpr::AO__scoped_atomic_store ||
972 e->
getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
973 e->
getOp() == AtomicExpr::AO__atomic_clear;
974 bool isLoad = e->
getOp() == AtomicExpr::AO__c11_atomic_load ||
975 e->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
976 e->
getOp() == AtomicExpr::AO__hip_atomic_load ||
977 e->
getOp() == AtomicExpr::AO__atomic_load ||
978 e->
getOp() == AtomicExpr::AO__atomic_load_n ||
979 e->
getOp() == AtomicExpr::AO__scoped_atomic_load ||
980 e->
getOp() == AtomicExpr::AO__scoped_atomic_load_n;
982 if (orderConst.has_value()) {
986 uint64_t ord = orderConst->Val.getInt().getZExtValue();
988 emitAtomicOp(*
this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
989 size,
static_cast<cir::MemOrder
>(ord), scopeConst, scope);
992 isWeakExpr, orderFailExpr, size, isStore,
993 isLoad, scopeConst, scope);
1006 auto order = cir::MemOrder::SequentiallyConsistent;
1019 cir::MemOrder order,
bool isVolatile,
1023 mlir::Location loc = dest.
getPointer().getLoc();
1028 AtomicInfo atomics(*
this, dest, loc);
1029 LValue lvalue = atomics.getAtomicLValue();
1034 atomics.emitCopyIntoMemory(rvalue);
1039 if (atomics.shouldUseLibCall()) {
1041 cgm.errorNYI(loc,
"emitAtomicStore: atomic store with library call");
1046 mlir::Value valueToStore = atomics.convertRValueToInt(rvalue);
1049 Address addr = atomics.getAtomicAddress();
1050 if (mlir::Value value = atomics.getScalarRValValueOrNull(rvalue)) {
1052 addr = atomics.castToAtomicIntPointer(addr);
1057 cir::StoreOp store = builder.createStore(loc, valueToStore, addr);
1062 store.setMemOrder(order);
1067 store.setIsVolatile(
true);
1073 cgm.errorNYI(loc,
"emitAtomicStore: non-simple atomic lvalue");
1080 switch (atomics.getEvaluationKind()) {
1096 bool zeroed =
false;
1098 zeroed = atomics.emitMemSetZeroIfNecessary();
1099 dest = atomics.projectValue();
1114 llvm_unreachable(
"bad evaluation kind");
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Type orderType, llvm::ArrayRef< cir::MemOrder > orders)
static cir::SyncScopeKind convertSyncScopeToCIR(CIRGenFunction &cgf, SourceRange range, clang::SyncScope scope)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static void emitMemOrderDefaultCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc)
static void emitAtomicExprWithDynamicMemOrder(CIRGenFunction &cgf, mlir::Value order, AtomicExpr *e, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad, const std::optional< Expr::EvalResult > &scopeConst, mlir::Value scopeValue)
static bool EvaluateAsInt(const Expr *E, Expr::EvalResult &ExprResult, const ASTContext &Ctx, Expr::SideEffectsKind AllowSideEffects, EvalInfo &Info)
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
Expr * getOrderFail() const
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
mlir::Value getPointer() const
mlir::Type getElementType() const
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::SyncScopeKindAttr scope={}, cir::MemOrderAttr order={})
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
Address getAddress() const
clang::QualType getType() const
mlir::Value getPointer() const
bool isVolatileQualified() const
This trivial value class is used to represent the result of an expression that is evaluated.
Address getAggregateAddress() const
Return the value of the address of the aggregate.
static RValue get(mlir::Value v)
mlir::Value getValue() const
Return the value of this scalar value.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
This represents one expression.
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
A (possibly-)qualified type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
A trivial tuple used to represent a source range.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
bool isPointerType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
SyncScope
Defines sync scope values used internally by clang.
static bool atomicInfoGetAtomicPointer()
static bool aggValueSlotGC()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool atomicUseLibCall()
static bool atomicOpenMP()
static bool atomicMicrosoftVolatile()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
APValue Val
Val - This is the value the expression can be folded to.