Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
return cir::LoadOp::create(*this, loc, ptr, /*isDeref=*/false, isVolatile,
isNontemporal,
/*alignment=*/alignmentAttr,
/*mem_order=*/
cir::MemOrderAttr{},
/*syncscope=*/cir::MemScopeKindAttr{},
/*mem_order=*/cir::MemOrderAttr{},
/*tbaa=*/cir::TBAAAttr{});
}

Expand Down
28 changes: 18 additions & 10 deletions clang/include/clang/CIR/Dialect/IR/CIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -533,6 +533,15 @@ def CIR_MemOrder : CIR_I32EnumAttr<
I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">
]>;

//===----------------------------------------------------------------------===//
// C/C++ sync scope definitions
//===----------------------------------------------------------------------===//

def CIR_MemScopeKind : CIR_I32EnumAttr<"MemScopeKind", "memory scope kind", [
I32EnumAttrCase<"SingleThread", 0, "single_thread">,
I32EnumAttrCase<"System", 1, "system">
]>;

//===----------------------------------------------------------------------===//
// AllocaOp
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -670,17 +679,19 @@ def CIR_LoadOp : CIR_Op<"load", [
%4 = cir.load volatile %0 : !cir.ptr<i32>, i32

// Others
%x = cir.load align(16) atomic(seq_cst) %0 : !cir.ptr<i32>, i32
%x = cir.load align(16) syncscope(single_thread) atomic(seq_cst)
%0 : !cir.ptr<i32>, i32
```
}];

let arguments = (ins Arg<CIR_PointerType, "the address to load from",
[MemRead]>:$addr, UnitAttr:$isDeref,
UnitAttr:$is_volatile,
UnitAttr:$is_nontemporal,
OptionalAttr<I64Attr>:$alignment,
OptionalAttr<CIR_MemOrder>:$mem_order,
OptionalAttr<CIR_AnyTBAAAttr>:$tbaa
OptionalAttr<I64Attr>:$alignment,
OptionalAttr<CIR_MemScopeKind>:$syncscope,
OptionalAttr<CIR_MemOrder>:$mem_order,
OptionalAttr<CIR_AnyTBAAAttr>:$tbaa
);
let results = (outs CIR_AnyType:$result);

Expand All @@ -689,6 +700,7 @@ def CIR_LoadOp : CIR_Op<"load", [
(`volatile` $is_volatile^)?
(`nontemporal` $is_nontemporal^)?
(`align` `(` $alignment^ `)`)?
(`syncscope` `(` $syncscope^ `)`)?
(`atomic` `(` $mem_order^ `)`)?
$addr `:` qualified(type($addr)) `,` type($result) attr-dict
(`tbaa` `(` $tbaa^ `)`)?
Expand All @@ -698,7 +710,8 @@ def CIR_LoadOp : CIR_Op<"load", [
// TODO(CIR): The final interface here should include an argument for the
// SyncScope::ID.
// This should be used over the ODS generated setMemOrder.
void setAtomic(cir::MemOrder order);
void setAtomic(cir::MemOrder order,
cir::MemScopeKind scope);
}];

// FIXME: add verifier.
Expand Down Expand Up @@ -6093,11 +6106,6 @@ def CIR_AtomicXchg : CIR_Op<"atomic.xchg", [
let hasVerifier = 1;
}

def CIR_MemScopeKind : CIR_I32EnumAttr<"MemScopeKind", "memory scope kind", [
I32EnumAttrCase<"SingleThread", 0, "single_thread">,
I32EnumAttrCase<"System", 1, "system">
]>;

def CIR_AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", [
AllTypesMatch<["old", "expected", "desired"]>
]> {
Expand Down
26 changes: 21 additions & 5 deletions clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "CIRGenOpenMPRuntime.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/Basic/SyncScope.h"
#include "clang/CIR/Dialect/IR/CIRAttrs.h"
#include "clang/CIR/Dialect/IR/CIRDataLayout.h"
#include "clang/CIR/Dialect/IR/CIRDialect.h"
Expand Down Expand Up @@ -350,6 +351,20 @@ static cir::IntAttr extractIntAttr(mlir::Value v) {
return {};
}

// Maps SyncScope::SingleScope to MemScopeKind::SingleThread,
// SyncScope::SystemScope to MemScopeKind::System,
// and asserts (llvm_unreachable) for anything else.
static cir::MemScopeKind convertSyncScopeToCIR(clang::SyncScope scope) {
switch (scope) {
case clang::SyncScope::SingleScope:
return cir::MemScopeKind::SingleThread;
case clang::SyncScope::SystemScope:
return cir::MemScopeKind::System;
default:
llvm_unreachable("NYI");
}
}

// Inspect a value that is the strong/weak flag for a compare-exchange. If it
// is a constant of intergral or boolean type, set `val` to the constant's
// boolean value and return true. Otherwise leave `val` unchanged and return
Expand Down Expand Up @@ -594,9 +609,7 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__scoped_atomic_load_n:
case AtomicExpr::AO__scoped_atomic_load: {
auto load = builder.createLoad(loc, Ptr);
// FIXME(cir): add scope information.
assert(!cir::MissingFeatures::syncScopeID());
load.setMemOrder(Order);
load.setAtomic(Order, Scope);
load.setIsVolatile(E->isVolatile());

// TODO(cir): this logic should be part of createStore, but doing so
Expand Down Expand Up @@ -818,9 +831,12 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest,
}

// Handle constant scope.
if (extractIntAttr(Scope)) {
if (auto scopeAttr = extractIntAttr(Scope)) {
assert(!cir::MissingFeatures::syncScopeID());
llvm_unreachable("NYI");
auto mappedScope =
convertSyncScopeToCIR(ScopeModel->map(scopeAttr.getUInt()));
emitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order, mappedScope);
return;
}

Expand Down
1 change: 1 addition & 0 deletions clang/lib/CIR/CodeGen/CIRGenBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -870,6 +870,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
return cir::LoadOp::create(
*this, loc, addr.getElementType(), addr.getPointer(), /*isDeref=*/false,
/*is_volatile=*/isVolatile, /*is_nontemporal=*/isNontemporal, align,
/*syncscope=*/cir::MemScopeKindAttr{},
/*mem_order=*/cir::MemOrderAttr{}, /*tbaa=*/cir::TBAAAttr{});
}

Expand Down
3 changes: 2 additions & 1 deletion clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "CIRGenFunction.h"
#include "CIRGenModule.h"
#include "TargetInfo.h"
#include "clang/CIR/Dialect/IR/CIROpsEnums.h"
#include "clang/CIR/MissingFeatures.h"

// TODO(cir): once all builtins are covered, decide whether we still
Expand Down Expand Up @@ -4485,7 +4486,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
case NEON::BI__builtin_neon_vldap1q_lane_s64: {
cir::LoadOp Load = builder.createAlignedLoad(
Ops[0].getLoc(), vTy.getElementType(), Ops[0], PtrOp0.getAlignment());
Load.setAtomic(cir::MemOrder::Acquire);
Load.setAtomic(cir::MemOrder::Acquire, cir::MemScopeKind::System);
return builder.create<cir::VecInsertOp>(getLoc(E->getExprLoc()),
builder.createBitcast(Ops[1], vTy),
Load, Ops[2]);
Expand Down
7 changes: 2 additions & 5 deletions clang/lib/CIR/Dialect/IR/CIRDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1012,12 +1012,9 @@ LogicalResult cir::ComplexImagPtrOp::verify() {
// LoadOp
//===----------------------------------------------------------------------===//

// TODO(CIR): The final interface here should include an argument for the
// SyncScope::ID.
void cir::LoadOp::setAtomic(cir::MemOrder order) {
void cir::LoadOp::setAtomic(cir::MemOrder order, cir::MemScopeKind scope) {
setMemOrder(order);
if (cir::MissingFeatures::syncScopeID())
llvm_unreachable("NYI");
setSyncscope(scope);
}

//===----------------------------------------------------------------------===//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,9 @@ void ItaniumCXXABI::lowerGetMethod(
mlir::Value vtablePtr = rewriter.create<cir::LoadOp>(
op.getLoc(), vtablePtrPtr, /*isDeref=*/false, /*isVolatile=*/false,
/*isNontemporal=*/false,
/*alignment=*/mlir::IntegerAttr(), /*mem_order=*/cir::MemOrderAttr(),
/*alignment=*/mlir::IntegerAttr(),
/*syncscope=*/cir::MemScopeKindAttr{},
/*mem_order=*/cir::MemOrderAttr(),
/*tbaa=*/mlir::ArrayAttr());

// Get the vtable offset.
Expand Down Expand Up @@ -418,6 +420,7 @@ void ItaniumCXXABI::lowerGetMethod(
op.getLoc(), vfpPtr, /*isDeref=*/false, /*isVolatile=*/false,
/*isNontemporal=*/false,
/*alignment=*/mlir::IntegerAttr(),
/*syncscope=*/cir::MemScopeKindAttr{},
/*mem_order=*/cir::MemOrderAttr(),
/*tbaa=*/mlir::ArrayAttr());
}
Expand Down
2 changes: 2 additions & 0 deletions clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1824,6 +1824,8 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
op->getLoc(), llvmTy, adaptor.getAddr(), /* alignment */ alignment,
op.getIsVolatile(), /* nontemporal */ op.getIsNontemporal(),
/* invariant */ false, /* invariantGroup */ invariant, ordering);
if (auto scope = op.getSyncscope())
newLoad.setSyncscope(getLLVMSyncScope(scope));

// Convert adapted result to its original type if needed.
mlir::Value result =
Expand Down
1 change: 1 addition & 0 deletions clang/lib/CIR/Lowering/ThroughMLIR/LowerCIRLoopToSCF.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -482,6 +482,7 @@ class CIRWhileOpLowering : public mlir::OpConversionPattern<cir::WhileOp> {
auto cond = rewriter.create<LoadOp>(
loc, boolTy, condAlloca, /*isDeref=*/false,
/*volatile=*/false, /*nontemporal=*/false, alignment,
/*syncscope=*/cir::MemScopeKindAttr{},
/*memorder=*/cir::MemOrderAttr{}, /*tbaa=*/cir::TBAAAttr{});
auto ifnot =
rewriter.create<IfOp>(loc, cond, /*withElseRegion=*/false,
Expand Down
16 changes: 8 additions & 8 deletions clang/test/CIR/CodeGen/AArch64/neon-ldst.c
Original file line number Diff line number Diff line change
Expand Up @@ -637,7 +637,7 @@ uint64x2_t test_vldap1q_lane_u64(uint64_t *a, uint64x2_t b) {
// CIR-LABEL:test_vldap1q_lane_u64
// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i
// CIR: [[TMP0:%.*]] = cir.cast bitcast {{.*}} : !cir.ptr<!void> -> !cir.ptr<!u64i>
// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr<!u64i>, !u64
// CIR: [[VAL:%.*]] = cir.load align(8) syncscope(system) atomic(acquire) [[TMP0]] : !cir.ptr<!u64i>, !u64
// CIR: [[VEC:%.*]] = cir.cast bitcast {{.*}} : !cir.vector<!s8i x 16> -> !cir.vector<!u64i x 2>
// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector<!u64i x 2>

Expand All @@ -654,7 +654,7 @@ int64x2_t test_vldap1q_lane_s64(int64_t *a, int64x2_t b) {
// CIR-LABEL:test_vldap1q_lane_s64
// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i
// CIR: [[TMP0:%.*]] = cir.cast bitcast {{.*}} : !cir.ptr<!void> -> !cir.ptr<!s64i>
// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr<!s64i>, !s64
// CIR: [[VAL:%.*]] = cir.load align(8) syncscope(system) atomic(acquire) [[TMP0]] : !cir.ptr<!s64i>, !s64
// CIR: [[VEC:%.*]] = cir.cast bitcast {{.*}} : !cir.vector<!s8i x 16> -> !cir.vector<!s64i x 2>
// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector<!s64i x 2>

Expand All @@ -671,7 +671,7 @@ float64x2_t test_vldap1q_lane_f64(float64_t *a, float64x2_t b) {
// CIR-LABEL:test_vldap1q_lane_f64
// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i
// CIR: [[TMP0:%.*]] = cir.cast bitcast {{.*}} : !cir.ptr<!void> -> !cir.ptr<!cir.double>
// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr<!cir.double>, !cir.double
// CIR: [[VAL:%.*]] = cir.load align(8) syncscope(system) atomic(acquire) [[TMP0]] : !cir.ptr<!cir.double>, !cir.double
// CIR: [[VEC:%.*]] = cir.cast bitcast {{.*}} : !cir.vector<!s8i x 16> -> !cir.vector<!cir.double x 2>
// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector<!cir.double x 2>

Expand All @@ -688,7 +688,7 @@ poly64x2_t test_vldap1q_lane_p64(poly64_t *a, poly64x2_t b) {
// CIR-LABEL:test_vldap1q_lane_p64
// CIR: [[LANE:%.*]] = cir.const #cir.int<1> : !s32i
// CIR: [[TMP0:%.*]] = cir.cast bitcast {{.*}} : !cir.ptr<!void> -> !cir.ptr<!s64i>
// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr<!s64i>, !s64
// CIR: [[VAL:%.*]] = cir.load align(8) syncscope(system) atomic(acquire) [[TMP0]] : !cir.ptr<!s64i>, !s64
// CIR: [[VEC:%.*]] = cir.cast bitcast {{.*}} : !cir.vector<!s8i x 16> -> !cir.vector<!s64i x 2>
// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector<!s64i x 2>

Expand All @@ -705,7 +705,7 @@ uint64x1_t test_vldap1_lane_u64(uint64_t *a, uint64x1_t b) {
// CIR-LABEL:test_vldap1_lane_u64
// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i
// CIR: [[TMP0:%.*]] = cir.cast bitcast {{.*}} : !cir.ptr<!void> -> !cir.ptr<!u64i>
// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr<!u64i>, !u64
// CIR: [[VAL:%.*]] = cir.load align(8) syncscope(system) atomic(acquire) [[TMP0]] : !cir.ptr<!u64i>, !u64
// CIR: [[VEC:%.*]] = cir.cast bitcast {{.*}} : !cir.vector<!s8i x 8> -> !cir.vector<!u64i x 1>
// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector<!u64i x 1>

Expand All @@ -722,7 +722,7 @@ int64x1_t test_vldap1_lane_s64(int64_t *a, int64x1_t b) {
// CIR-LABEL:test_vldap1_lane_s64
// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i
// CIR: [[TMP0:%.*]] = cir.cast bitcast {{.*}} : !cir.ptr<!void> -> !cir.ptr<!s64i>
// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr<!s64i>, !s64
// CIR: [[VAL:%.*]] = cir.load align(8) syncscope(system) atomic(acquire) [[TMP0]] : !cir.ptr<!s64i>, !s64
// CIR: [[VEC:%.*]] = cir.cast bitcast {{.*}} : !cir.vector<!s8i x 8> -> !cir.vector<!s64i x 1>
// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector<!s64i x 1>

Expand All @@ -740,7 +740,7 @@ float64x1_t test_vldap1_lane_f64(float64_t *a, float64x1_t b) {
// CIR-LABEL: test_vldap1_lane_f64
// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i
// CIR: [[TMP0:%.*]] = cir.cast bitcast {{.*}} : !cir.ptr<!void> -> !cir.ptr<!cir.double>
// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr<!cir.double>, !cir.double
// CIR: [[VAL:%.*]] = cir.load align(8) syncscope(system) atomic(acquire) [[TMP0]] : !cir.ptr<!cir.double>, !cir.double
// CIR: [[VEC:%.*]] = cir.cast bitcast {{.*}} : !cir.vector<!s8i x 8> -> !cir.vector<!cir.double x 1>
// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector<!cir.double x 1>

Expand All @@ -757,7 +757,7 @@ poly64x1_t test_vldap1_lane_p64(poly64_t *a, poly64x1_t b) {
// CIR-LABEL: test_vldap1_lane_p64
// CIR: [[LANE:%.*]] = cir.const #cir.int<0> : !s32i
// CIR: [[TMP0:%.*]] = cir.cast bitcast {{.*}} : !cir.ptr<!void> -> !cir.ptr<!s64i>
// CIR: [[VAL:%.*]] = cir.load align(8) atomic(acquire) [[TMP0]] : !cir.ptr<!s64i>, !s64
// CIR: [[VAL:%.*]] = cir.load align(8) syncscope(system) atomic(acquire) [[TMP0]] : !cir.ptr<!s64i>, !s64
// CIR: [[VEC:%.*]] = cir.cast bitcast {{.*}} : !cir.vector<!s8i x 8> -> !cir.vector<!s64i x 1>
// CIR: [[TMP:%.*]] = cir.vec.insert [[VAL]], {{.*}}[[[LANE]] : !s32i] : !cir.vector<!s64i x 1>

Expand Down
35 changes: 35 additions & 0 deletions clang/test/CIR/CodeGen/scoped-atomic-load-store.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
// RUN: %clang_cc1 -x c -triple x86_64-unknown-linux-gnu -fclangir -emit-cir %s -o %t.cir
// RUN: FileCheck --input-file=%t.cir %s -check-prefix=CIR
// RUN: %clang_cc1 -x c -triple x86_64-unknown-linux-gnu -fclangir -emit-llvm %s -o %t-cir.ll
// RUN: FileCheck --input-file=%t-cir.ll %s -check-prefix=LLVM
// RUN: %clang_cc1 -x c -triple x86_64-unknown-linux-gnu -emit-llvm %s -o %t.ll
// RUN: FileCheck --input-file=%t.ll %s -check-prefix=OGCG

int scoped_load_thread(int *ptr) {
return __scoped_atomic_load_n(ptr, __ATOMIC_RELAXED, __MEMORY_SCOPE_SINGLE);
}

// CIR-LABEL: @scoped_load_thread
// CIR: %[[ATOMIC_LOAD:.*]] = cir.load align(4) syncscope(single_thread) atomic(relaxed) %{{.*}} : !cir.ptr<!s32i>, !s32i
// CIR: cir.store align(4) %[[ATOMIC_LOAD]], %{{.*}} : !s32i, !cir.ptr<!s32i>

// LLVM-LABEL: @scoped_load_thread
// LLVM: load atomic i32, ptr %{{.*}} syncscope("singlethread") monotonic, align 4

// OGCG-LABEL: @scoped_load_thread
// OGCG: load atomic i32, ptr %{{.*}} monotonic, align 4

int scoped_load_system(int *ptr) {
return __scoped_atomic_load_n(ptr, __ATOMIC_SEQ_CST, __MEMORY_SCOPE_SYSTEM);
}

// CIR-LABEL: @scoped_load_system
// CIR: cir.load align(4) syncscope(system) atomic(seq_cst) %{{.*}} : !cir.ptr<!s32i>, !s32i

// LLVM-LABEL: @scoped_load_system
// LLVM: load atomic i32, ptr %{{.*}} seq_cst, align 4
// LLVM-NOT: syncscope(

// OGCG-LABEL: @scoped_load_system
// OGCG: load atomic i32, ptr %{{.*}} seq_cst, align 4
// OGCG-NOT: syncscope(
41 changes: 29 additions & 12 deletions clang/test/CIR/Lowering/syncscope.cir
Original file line number Diff line number Diff line change
@@ -1,12 +1,29 @@
// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o - | FileCheck %s -check-prefix=LLVM

!s32i = !cir.int<s, 32>
#fn_attr = #cir<extra({inline = #cir.inline<no>, nothrow = #cir.nothrow, optnone = #cir.optnone})>
module {
cir.func @test(%ptr: !cir.ptr<!s32i>, %expected: !s32i, %desired: !s32i) -> !cir.bool extra(#fn_attr) {
%old, %cmp = cir.atomic.cmp_xchg(%ptr : !cir.ptr<!s32i>, %expected : !s32i, %desired : !s32i, success = acquire, failure = acquire) syncscope(single_thread) align(4) : (!s32i, !cir.bool)
cir.return %cmp: !cir.bool
}
}

// LLVM: {{%.*}} = cmpxchg ptr {{%.*}}, i32 {{%.*}}, i32 {{%.*}} syncscope("singlethread") acquire acquire, align 4
// RUN: cir-translate %s -cir-to-llvmir --disable-cc-lowering -o - | FileCheck %s -check-prefix=LLVM

!s32i = !cir.int<s, 32>
#fn_attr = #cir<extra({inline = #cir.inline<no>, nothrow = #cir.nothrow, optnone = #cir.optnone})>
module {
cir.func @test(%ptr: !cir.ptr<!s32i>, %expected: !s32i, %desired: !s32i) -> !cir.bool extra(#fn_attr) {
%old, %cmp = cir.atomic.cmp_xchg(%ptr : !cir.ptr<!s32i>, %expected : !s32i, %desired : !s32i, success = acquire, failure = acquire) syncscope(single_thread) align(4) : (!s32i, !cir.bool)
cir.return %cmp: !cir.bool
}

cir.func @load(%ptr: !cir.ptr<!s32i>) -> !s32i extra(#fn_attr) {
%val = cir.load syncscope(single_thread) atomic(relaxed) %ptr : !cir.ptr<!s32i>, !s32i
cir.return %val : !s32i
}


// System scope should not materialize a syncscope attribute.
cir.func @system_load(%ptr: !cir.ptr<!s32i>) -> !s32i extra(#fn_attr) {
%val = cir.load atomic(seq_cst) %ptr : !cir.ptr<!s32i>, !s32i
cir.return %val : !s32i
}

}

// LLVM: {{%.*}} = cmpxchg ptr {{%.*}}, i32 {{%.*}}, i32 {{%.*}} syncscope("singlethread") acquire acquire, align 4
// LLVM: load atomic i32, ptr {{%.*}} syncscope("singlethread") monotonic, align 4
// LLVM-LABEL: @system_load
// LLVM: load atomic i32, ptr {{%.*}} seq_cst, align 4
// LLVM-NOT: syncscope(