diff options
Diffstat (limited to 'test/CodeGen/AArch64/arm64-atomic.ll')
-rw-r--r-- | test/CodeGen/AArch64/arm64-atomic.ll | 194 |
1 files changed, 104 insertions, 90 deletions
diff --git a/test/CodeGen/AArch64/arm64-atomic.ll b/test/CodeGen/AArch64/arm64-atomic.ll index b56f91ddd111..9136fb6271b5 100644 --- a/test/CodeGen/AArch64/arm64-atomic.ll +++ b/test/CodeGen/AArch64/arm64-atomic.ll @@ -1,37 +1,49 @@ -; RUN: llc < %s -march=arm64 -verify-machineinstrs -mcpu=cyclone | FileCheck %s +; RUN: llc < %s -march=arm64 -asm-verbose=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s -define i32 @val_compare_and_swap(i32* %p) { +define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 { ; CHECK-LABEL: val_compare_and_swap: -; CHECK: orr [[NEWVAL_REG:w[0-9]+]], wzr, #0x4 -; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: -; CHECK: ldaxr [[RESULT:w[0-9]+]], [x0] -; CHECK: cmp [[RESULT]], #7 -; CHECK: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]] -; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], [[NEWVAL_REG]], [x0] -; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]] -; CHECK: [[LABEL2]]: - %pair = cmpxchg i32* %p, i32 7, i32 4 acquire acquire +; CHECK-NEXT: [[LABEL:.?LBB[0-9]+_[0-9]+]]: +; CHECK-NEXT: ldaxr [[RESULT:w[0-9]+]], [x0] +; CHECK-NEXT: cmp [[RESULT]], w1 +; CHECK-NEXT: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]] +; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], w2, [x0] +; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[LABEL]] +; CHECK-NEXT: [[LABEL2]]: + %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire %val = extractvalue { i32, i1 } %pair, 0 ret i32 %val } -define i64 @val_compare_and_swap_64(i64* %p) { +define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 { +; CHECK-LABEL: val_compare_and_swap_rel: +; CHECK-NEXT: [[LABEL:.?LBB[0-9]+_[0-9]+]]: +; CHECK-NEXT: ldaxr [[RESULT:w[0-9]+]], [x0] +; CHECK-NEXT: cmp [[RESULT]], w1 +; CHECK-NEXT: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]] +; CHECK-NEXT: stlxr [[SCRATCH_REG:w[0-9]+]], w2, [x0] +; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[LABEL]] +; CHECK-NEXT: [[LABEL2]]: + %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel monotonic + %val = extractvalue { i32, i1 } %pair, 0 + ret i32 %val +} + +define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 { ; CHECK-LABEL: val_compare_and_swap_64: -; CHECK: orr w[[NEWVAL_REG:[0-9]+]], wzr, #0x4 -; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: -; CHECK: ldxr [[RESULT:x[0-9]+]], [x0] -; CHECK: cmp [[RESULT]], #7 -; CHECK: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]] -; CHECK-NOT: stxr x[[NEWVAL_REG]], x[[NEWVAL_REG]] -; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], x[[NEWVAL_REG]], [x0] -; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]] -; CHECK: [[LABEL2]]: - %pair = cmpxchg i64* %p, i64 7, i64 4 monotonic monotonic +; CHECK-NEXT: mov x[[ADDR:[0-9]+]], x0 +; CHECK-NEXT: [[LABEL:.?LBB[0-9]+_[0-9]+]]: +; CHECK-NEXT: ldxr [[RESULT:x[0-9]+]], [x[[ADDR]]] +; CHECK-NEXT: cmp [[RESULT]], x1 +; CHECK-NEXT: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]] +; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], x2, [x[[ADDR]]] +; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[LABEL]] +; CHECK-NEXT: [[LABEL2]]: + %pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic monotonic %val = extractvalue { i64, i1 } %pair, 0 ret i64 %val } -define i32 @fetch_and_nand(i32* %p) { +define i32 @fetch_and_nand(i32* %p) #0 { ; CHECK-LABEL: fetch_and_nand: ; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: ; CHECK: ldxr w[[DEST_REG:[0-9]+]], [x0] @@ -45,7 +57,7 @@ define i32 @fetch_and_nand(i32* %p) { ret i32 %val } -define i64 @fetch_and_nand_64(i64* %p) { +define i64 @fetch_and_nand_64(i64* %p) #0 { ; CHECK-LABEL: fetch_and_nand_64: ; CHECK: mov x[[ADDR:[0-9]+]], x0 ; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: @@ -59,7 +71,7 @@ define i64 @fetch_and_nand_64(i64* %p) { ret i64 %val } -define i32 @fetch_and_or(i32* %p) { +define i32 @fetch_and_or(i32* %p) #0 { ; CHECK-LABEL: fetch_and_or: ; CHECK: movz [[OLDVAL_REG:w[0-9]+]], #0x5 ; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: @@ -73,7 +85,7 @@ define i32 @fetch_and_or(i32* %p) { ret i32 %val } -define i64 @fetch_and_or_64(i64* %p) { +define i64 @fetch_and_or_64(i64* %p) #0 { ; CHECK: fetch_and_or_64: ; CHECK: mov x[[ADDR:[0-9]+]], x0 ; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: @@ -85,52 +97,52 @@ define i64 @fetch_and_or_64(i64* %p) { ret i64 %val } -define void @acquire_fence() { +define void @acquire_fence() #0 { fence acquire ret void ; CHECK-LABEL: acquire_fence: ; CHECK: dmb ishld } -define void @release_fence() { +define void @release_fence() #0 { fence release ret void ; CHECK-LABEL: release_fence: ; CHECK: dmb ish{{$}} } -define void @seq_cst_fence() { +define void @seq_cst_fence() #0 { fence seq_cst ret void ; CHECK-LABEL: seq_cst_fence: ; CHECK: dmb ish{{$}} } -define i32 @atomic_load(i32* %p) { - %r = load atomic i32* %p seq_cst, align 4 +define i32 @atomic_load(i32* %p) #0 { + %r = load atomic i32, i32* %p seq_cst, align 4 ret i32 %r ; CHECK-LABEL: atomic_load: ; CHECK: ldar } -define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) { +define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 { ; CHECK-LABEL: atomic_load_relaxed_8: - %ptr_unsigned = getelementptr i8* %p, i32 4095 - %val_unsigned = load atomic i8* %ptr_unsigned monotonic, align 1 + %ptr_unsigned = getelementptr i8, i8* %p, i32 4095 + %val_unsigned = load atomic i8, i8* %ptr_unsigned monotonic, align 1 ; CHECK: ldrb {{w[0-9]+}}, [x0, #4095] - %ptr_regoff = getelementptr i8* %p, i32 %off32 - %val_regoff = load atomic i8* %ptr_regoff unordered, align 1 + %ptr_regoff = getelementptr i8, i8* %p, i32 %off32 + %val_regoff = load atomic i8, i8* %ptr_regoff unordered, align 1 %tot1 = add i8 %val_unsigned, %val_regoff ; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw] - %ptr_unscaled = getelementptr i8* %p, i32 -256 - %val_unscaled = load atomic i8* %ptr_unscaled monotonic, align 1 + %ptr_unscaled = getelementptr i8, i8* %p, i32 -256 + %val_unscaled = load atomic i8, i8* %ptr_unscaled monotonic, align 1 %tot2 = add i8 %tot1, %val_unscaled ; CHECK: ldurb {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm) - %val_random = load atomic i8* %ptr_random unordered, align 1 + %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm) + %val_random = load atomic i8, i8* %ptr_random unordered, align 1 %tot3 = add i8 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldrb {{w[0-9]+}}, [x[[ADDR]]] @@ -138,24 +150,24 @@ define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) { ret i8 %tot3 } -define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) { +define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 { ; CHECK-LABEL: atomic_load_relaxed_16: - %ptr_unsigned = getelementptr i16* %p, i32 4095 - %val_unsigned = load atomic i16* %ptr_unsigned monotonic, align 2 + %ptr_unsigned = getelementptr i16, i16* %p, i32 4095 + %val_unsigned = load atomic i16, i16* %ptr_unsigned monotonic, align 2 ; CHECK: ldrh {{w[0-9]+}}, [x0, #8190] - %ptr_regoff = getelementptr i16* %p, i32 %off32 - %val_regoff = load atomic i16* %ptr_regoff unordered, align 2 + %ptr_regoff = getelementptr i16, i16* %p, i32 %off32 + %val_regoff = load atomic i16, i16* %ptr_regoff unordered, align 2 %tot1 = add i16 %val_unsigned, %val_regoff ; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1] - %ptr_unscaled = getelementptr i16* %p, i32 -128 - %val_unscaled = load atomic i16* %ptr_unscaled monotonic, align 2 + %ptr_unscaled = getelementptr i16, i16* %p, i32 -128 + %val_unscaled = load atomic i16, i16* %ptr_unscaled monotonic, align 2 %tot2 = add i16 %tot1, %val_unscaled ; CHECK: ldurh {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) - %val_random = load atomic i16* %ptr_random unordered, align 2 + %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) + %val_random = load atomic i16, i16* %ptr_random unordered, align 2 %tot3 = add i16 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldrh {{w[0-9]+}}, [x[[ADDR]]] @@ -163,24 +175,24 @@ define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) { ret i16 %tot3 } -define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) { +define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 { ; CHECK-LABEL: atomic_load_relaxed_32: - %ptr_unsigned = getelementptr i32* %p, i32 4095 - %val_unsigned = load atomic i32* %ptr_unsigned monotonic, align 4 + %ptr_unsigned = getelementptr i32, i32* %p, i32 4095 + %val_unsigned = load atomic i32, i32* %ptr_unsigned monotonic, align 4 ; CHECK: ldr {{w[0-9]+}}, [x0, #16380] - %ptr_regoff = getelementptr i32* %p, i32 %off32 - %val_regoff = load atomic i32* %ptr_regoff unordered, align 4 + %ptr_regoff = getelementptr i32, i32* %p, i32 %off32 + %val_regoff = load atomic i32, i32* %ptr_regoff unordered, align 4 %tot1 = add i32 %val_unsigned, %val_regoff ; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2] - %ptr_unscaled = getelementptr i32* %p, i32 -64 - %val_unscaled = load atomic i32* %ptr_unscaled monotonic, align 4 + %ptr_unscaled = getelementptr i32, i32* %p, i32 -64 + %val_unscaled = load atomic i32, i32* %ptr_unscaled monotonic, align 4 %tot2 = add i32 %tot1, %val_unscaled ; CHECK: ldur {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) - %val_random = load atomic i32* %ptr_random unordered, align 4 + %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) + %val_random = load atomic i32, i32* %ptr_random unordered, align 4 %tot3 = add i32 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldr {{w[0-9]+}}, [x[[ADDR]]] @@ -188,24 +200,24 @@ define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) { ret i32 %tot3 } -define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) { +define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 { ; CHECK-LABEL: atomic_load_relaxed_64: - %ptr_unsigned = getelementptr i64* %p, i32 4095 - %val_unsigned = load atomic i64* %ptr_unsigned monotonic, align 8 + %ptr_unsigned = getelementptr i64, i64* %p, i32 4095 + %val_unsigned = load atomic i64, i64* %ptr_unsigned monotonic, align 8 ; CHECK: ldr {{x[0-9]+}}, [x0, #32760] - %ptr_regoff = getelementptr i64* %p, i32 %off32 - %val_regoff = load atomic i64* %ptr_regoff unordered, align 8 + %ptr_regoff = getelementptr i64, i64* %p, i32 %off32 + %val_regoff = load atomic i64, i64* %ptr_regoff unordered, align 8 %tot1 = add i64 %val_unsigned, %val_regoff ; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3] - %ptr_unscaled = getelementptr i64* %p, i32 -32 - %val_unscaled = load atomic i64* %ptr_unscaled monotonic, align 8 + %ptr_unscaled = getelementptr i64, i64* %p, i32 -32 + %val_unscaled = load atomic i64, i64* %ptr_unscaled monotonic, align 8 %tot2 = add i64 %tot1, %val_unscaled ; CHECK: ldur {{x[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) - %val_random = load atomic i64* %ptr_random unordered, align 8 + %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) + %val_random = load atomic i64, i64* %ptr_random unordered, align 8 %tot3 = add i64 %tot2, %val_random ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: ldr {{x[0-9]+}}, [x[[ADDR]]] @@ -214,28 +226,28 @@ define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) { } -define void @atomc_store(i32* %p) { +define void @atomc_store(i32* %p) #0 { store atomic i32 4, i32* %p seq_cst, align 4 ret void ; CHECK-LABEL: atomc_store: ; CHECK: stlr } -define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) { +define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) #0 { ; CHECK-LABEL: atomic_store_relaxed_8: - %ptr_unsigned = getelementptr i8* %p, i32 4095 + %ptr_unsigned = getelementptr i8, i8* %p, i32 4095 store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1 ; CHECK: strb {{w[0-9]+}}, [x0, #4095] - %ptr_regoff = getelementptr i8* %p, i32 %off32 + %ptr_regoff = getelementptr i8, i8* %p, i32 %off32 store atomic i8 %val, i8* %ptr_regoff unordered, align 1 ; CHECK: strb {{w[0-9]+}}, [x0, w1, sxtw] - %ptr_unscaled = getelementptr i8* %p, i32 -256 + %ptr_unscaled = getelementptr i8, i8* %p, i32 -256 store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1 ; CHECK: sturb {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm) + %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm) store atomic i8 %val, i8* %ptr_random unordered, align 1 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: strb {{w[0-9]+}}, [x[[ADDR]]] @@ -243,21 +255,21 @@ define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) { ret void } -define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) { +define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) #0 { ; CHECK-LABEL: atomic_store_relaxed_16: - %ptr_unsigned = getelementptr i16* %p, i32 4095 + %ptr_unsigned = getelementptr i16, i16* %p, i32 4095 store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2 ; CHECK: strh {{w[0-9]+}}, [x0, #8190] - %ptr_regoff = getelementptr i16* %p, i32 %off32 + %ptr_regoff = getelementptr i16, i16* %p, i32 %off32 store atomic i16 %val, i16* %ptr_regoff unordered, align 2 ; CHECK: strh {{w[0-9]+}}, [x0, w1, sxtw #1] - %ptr_unscaled = getelementptr i16* %p, i32 -128 + %ptr_unscaled = getelementptr i16, i16* %p, i32 -128 store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2 ; CHECK: sturh {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) + %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) store atomic i16 %val, i16* %ptr_random unordered, align 2 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: strh {{w[0-9]+}}, [x[[ADDR]]] @@ -265,21 +277,21 @@ define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) { ret void } -define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) { +define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) #0 { ; CHECK-LABEL: atomic_store_relaxed_32: - %ptr_unsigned = getelementptr i32* %p, i32 4095 + %ptr_unsigned = getelementptr i32, i32* %p, i32 4095 store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4 ; CHECK: str {{w[0-9]+}}, [x0, #16380] - %ptr_regoff = getelementptr i32* %p, i32 %off32 + %ptr_regoff = getelementptr i32, i32* %p, i32 %off32 store atomic i32 %val, i32* %ptr_regoff unordered, align 4 ; CHECK: str {{w[0-9]+}}, [x0, w1, sxtw #2] - %ptr_unscaled = getelementptr i32* %p, i32 -64 + %ptr_unscaled = getelementptr i32, i32* %p, i32 -64 store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4 ; CHECK: stur {{w[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) + %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) store atomic i32 %val, i32* %ptr_random unordered, align 4 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: str {{w[0-9]+}}, [x[[ADDR]]] @@ -287,21 +299,21 @@ define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) { ret void } -define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) { +define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) #0 { ; CHECK-LABEL: atomic_store_relaxed_64: - %ptr_unsigned = getelementptr i64* %p, i32 4095 + %ptr_unsigned = getelementptr i64, i64* %p, i32 4095 store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8 ; CHECK: str {{x[0-9]+}}, [x0, #32760] - %ptr_regoff = getelementptr i64* %p, i32 %off32 + %ptr_regoff = getelementptr i64, i64* %p, i32 %off32 store atomic i64 %val, i64* %ptr_regoff unordered, align 8 ; CHECK: str {{x[0-9]+}}, [x0, w1, sxtw #3] - %ptr_unscaled = getelementptr i64* %p, i32 -32 + %ptr_unscaled = getelementptr i64, i64* %p, i32 -32 store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8 ; CHECK: stur {{x[0-9]+}}, [x0, #-256] - %ptr_random = getelementptr i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) + %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) store atomic i64 %val, i64* %ptr_random unordered, align 8 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 ; CHECK: str {{x[0-9]+}}, [x[[ADDR]]] @@ -319,13 +331,13 @@ define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) { define i32 @next_id() nounwind optsize ssp align 2 { entry: - %0 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst + %0 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic", %"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst %add.i = add i32 %0, 1 %tobool = icmp eq i32 %add.i, 0 br i1 %tobool, label %if.else, label %return if.else: ; preds = %entry - %1 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst + %1 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic", %"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst %add.i2 = add i32 %1, 1 br label %return @@ -333,3 +345,5 @@ return: ; preds = %if.else, %entry %retval.0 = phi i32 [ %add.i2, %if.else ], [ %add.i, %entry ] ret i32 %retval.0 } + +attributes #0 = { nounwind } |