aboutsummaryrefslogtreecommitdiff
path: root/test/CodeGen/PowerPC
diff options
context:
space:
mode:
Diffstat (limited to 'test/CodeGen/PowerPC')
-rw-r--r--test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll36
-rw-r--r--test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll1
-rw-r--r--test/CodeGen/PowerPC/BreakableToken-reduced.ll4
-rw-r--r--test/CodeGen/PowerPC/aantidep-def-ec.mir16
-rw-r--r--test/CodeGen/PowerPC/addegluecrash.ll58
-rw-r--r--test/CodeGen/PowerPC/addi-licm.ll8
-rw-r--r--test/CodeGen/PowerPC/anon_aggr.ll59
-rw-r--r--test/CodeGen/PowerPC/atomics-regression.ll9546
-rw-r--r--test/CodeGen/PowerPC/bitcasts-direct-move.ll4
-rw-r--r--test/CodeGen/PowerPC/branch_coalesce.ll31
-rw-r--r--test/CodeGen/PowerPC/complex-return.ll12
-rw-r--r--test/CodeGen/PowerPC/crbit-asm.ll7
-rw-r--r--test/CodeGen/PowerPC/crbits.ll9
-rw-r--r--test/CodeGen/PowerPC/ctrloop-i128.ll34
-rw-r--r--test/CodeGen/PowerPC/ctrloop-intrin.ll12
-rw-r--r--test/CodeGen/PowerPC/expand-contiguous-isel.ll151
-rw-r--r--test/CodeGen/PowerPC/expand-isel-1.mir57
-rw-r--r--test/CodeGen/PowerPC/expand-isel-2.mir57
-rw-r--r--test/CodeGen/PowerPC/expand-isel-3.mir58
-rw-r--r--test/CodeGen/PowerPC/expand-isel-4.mir59
-rw-r--r--test/CodeGen/PowerPC/expand-isel-5.mir54
-rw-r--r--test/CodeGen/PowerPC/expand-isel-6.mir57
-rw-r--r--test/CodeGen/PowerPC/expand-isel-7.mir58
-rw-r--r--test/CodeGen/PowerPC/expand-isel-8.mir65
-rw-r--r--test/CodeGen/PowerPC/expand-isel.ll227
-rw-r--r--test/CodeGen/PowerPC/fast-isel-load-store.ll2
-rw-r--r--test/CodeGen/PowerPC/fma-aggr-FMF.ll35
-rw-r--r--test/CodeGen/PowerPC/fold-zero.ll21
-rw-r--r--test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll8
-rw-r--r--test/CodeGen/PowerPC/fp128-bitcast-after-operation.ll32
-rw-r--r--test/CodeGen/PowerPC/i1-ext-fold.ll25
-rw-r--r--test/CodeGen/PowerPC/i1-to-double.ll22
-rw-r--r--test/CodeGen/PowerPC/i64_fp_round.ll11
-rw-r--r--test/CodeGen/PowerPC/ifcvt.ll11
-rw-r--r--test/CodeGen/PowerPC/indirectbr.ll36
-rw-r--r--test/CodeGen/PowerPC/isel.ll19
-rw-r--r--test/CodeGen/PowerPC/jaggedstructs.ll52
-rw-r--r--test/CodeGen/PowerPC/lsa.ll16
-rw-r--r--test/CodeGen/PowerPC/mature-mc-support.ll2
-rw-r--r--test/CodeGen/PowerPC/mcm-obj.ll5
-rw-r--r--test/CodeGen/PowerPC/misched-inorder-latency.ll4
-rw-r--r--test/CodeGen/PowerPC/optcmp.ll32
-rw-r--r--test/CodeGen/PowerPC/p8-isel-sched.ll13
-rw-r--r--test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll601
-rw-r--r--test/CodeGen/PowerPC/ppc-crbits-onoff.ll13
-rw-r--r--test/CodeGen/PowerPC/ppc-shrink-wrapping.ll2
-rw-r--r--test/CodeGen/PowerPC/ppc64-align-long-double.ll41
-rw-r--r--test/CodeGen/PowerPC/ppc64-gep-opt.ll4
-rw-r--r--test/CodeGen/PowerPC/ppc64le-aggregates.ll5
-rw-r--r--test/CodeGen/PowerPC/pr30451.ll20
-rw-r--r--test/CodeGen/PowerPC/pr32063.ll16
-rw-r--r--test/CodeGen/PowerPC/pr32140.ll59
-rw-r--r--test/CodeGen/PowerPC/pristine-and-livein.mir330
-rw-r--r--test/CodeGen/PowerPC/select-i1-vs-i1.ll186
-rw-r--r--test/CodeGen/PowerPC/select_const.ll789
-rw-r--r--test/CodeGen/PowerPC/setcc-logic.ll478
-rw-r--r--test/CodeGen/PowerPC/setcc-to-sub.ll73
-rw-r--r--test/CodeGen/PowerPC/sjlj_no0x.ll29
-rw-r--r--test/CodeGen/PowerPC/srl-mask.ll11
-rw-r--r--test/CodeGen/PowerPC/stacksize.ll86
-rw-r--r--test/CodeGen/PowerPC/structsinmem.ll28
-rw-r--r--test/CodeGen/PowerPC/structsinregs.ll60
-rw-r--r--test/CodeGen/PowerPC/subreg-postra-2.ll7
-rw-r--r--test/CodeGen/PowerPC/subreg-postra.ll6
-rw-r--r--test/CodeGen/PowerPC/subtract_from_imm.ll41
-rw-r--r--test/CodeGen/PowerPC/swaps-le-4.ll8
-rw-r--r--test/CodeGen/PowerPC/swaps-le-7.ll4
-rw-r--r--test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll6
-rw-r--r--test/CodeGen/PowerPC/tail-dup-break-cfg.ll140
-rw-r--r--test/CodeGen/PowerPC/tail-dup-layout.ll494
-rw-r--r--test/CodeGen/PowerPC/toc-load-sched-bug.ll28
-rw-r--r--test/CodeGen/PowerPC/vec_absd.ll4
-rw-r--r--test/CodeGen/PowerPC/vec_cmp.ll40
-rw-r--r--test/CodeGen/PowerPC/vsx-args.ll12
-rw-r--r--test/CodeGen/PowerPC/vsx-infl-copy1.ll18
-rw-r--r--test/CodeGen/PowerPC/vsx-p9.ll12
76 files changed, 13847 insertions, 830 deletions
diff --git a/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll b/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
index 264967157d7a..56f4a4173ef5 100644
--- a/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
+++ b/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
@@ -1,17 +1,33 @@
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-apple-darwin | grep extsw | count 2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s
@lens = external global i8* ; <i8**> [#uses=1]
@vals = external global i32* ; <i32**> [#uses=1]
define i32 @test(i32 %i) {
- %tmp = load i8*, i8** @lens ; <i8*> [#uses=1]
- %tmp1 = getelementptr i8, i8* %tmp, i32 %i ; <i8*> [#uses=1]
- %tmp.upgrd.1 = load i8, i8* %tmp1 ; <i8> [#uses=1]
- %tmp2 = zext i8 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
- %tmp3 = load i32*, i32** @vals ; <i32*> [#uses=1]
- %tmp5 = sub i32 1, %tmp2 ; <i32> [#uses=1]
- %tmp6 = getelementptr i32, i32* %tmp3, i32 %tmp5 ; <i32*> [#uses=1]
- %tmp7 = load i32, i32* %tmp6 ; <i32> [#uses=1]
- ret i32 %tmp7
+; CHECK-LABEL: test:
+; CHECK: # BB#0:
+; CHECK-NEXT: addis 4, 2, .LC0@toc@ha
+; CHECK-NEXT: extsw 3, 3
+; CHECK-NEXT: addis 5, 2, .LC1@toc@ha
+; CHECK-NEXT: ld 4, .LC0@toc@l(4)
+; CHECK-NEXT: ld 4, 0(4)
+; CHECK-NEXT: lbzx 3, 4, 3
+; CHECK-NEXT: ld 4, .LC1@toc@l(5)
+; CHECK-NEXT: subfic 3, 3, 1
+; CHECK-NEXT: extsw 3, 3
+; CHECK-NEXT: ld 4, 0(4)
+; CHECK-NEXT: sldi 3, 3, 2
+; CHECK-NEXT: lwzx 3, 4, 3
+; CHECK-NEXT: blr
+ %tmp = load i8*, i8** @lens ; <i8*> [#uses=1]
+ %tmp1 = getelementptr i8, i8* %tmp, i32 %i ; <i8*> [#uses=1]
+ %tmp.upgrd.1 = load i8, i8* %tmp1 ; <i8> [#uses=1]
+ %tmp2 = zext i8 %tmp.upgrd.1 to i32 ; <i32> [#uses=1]
+ %tmp3 = load i32*, i32** @vals ; <i32*> [#uses=1]
+ %tmp5 = sub i32 1, %tmp2 ; <i32> [#uses=1]
+ %tmp6 = getelementptr i32, i32* %tmp3, i32 %tmp5 ; <i32*> [#uses=1]
+ %tmp7 = load i32, i32* %tmp6 ; <i32> [#uses=1]
+ ret i32 %tmp7
}
diff --git a/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll b/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
index bd496704890f..53bad4fe06ee 100644
--- a/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
+++ b/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
@@ -1,5 +1,4 @@
; RUN: llc -mcpu=g5 < %s | FileCheck %s
-; RUN: llc -mcpu=g5 -addr-sink-using-gep=1 < %s | FileCheck %s
;; Formerly crashed, see PR 1508
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc64-apple-darwin8"
diff --git a/test/CodeGen/PowerPC/BreakableToken-reduced.ll b/test/CodeGen/PowerPC/BreakableToken-reduced.ll
index 39516537da42..dcc093041682 100644
--- a/test/CodeGen/PowerPC/BreakableToken-reduced.ll
+++ b/test/CodeGen/PowerPC/BreakableToken-reduced.ll
@@ -265,12 +265,12 @@ _ZNK4llvm9StringRef10startswithES0_.exit: ; preds = %entry._ZNK4llvm9Str
}
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.start(i64, i8* nocapture) #2
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
declare void @_ZN5clang6format17WhitespaceManager24replaceWhitespaceInTokenERKNS0_11FormatTokenEjjN4llvm9StringRefES6_bjji(%"class.clang::format::WhitespaceManager"*, %"struct.clang::format::FormatToken"* dereferenceable(272), i32 zeroext, i32 zeroext, [2 x i64], [2 x i64], i1 zeroext, i32 zeroext, i32 zeroext, i32 signext) #3
; Function Attrs: nounwind argmemonly
-declare void @llvm.lifetime.end(i64, i8* nocapture) #2
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
attributes #9 = { nounwind }
diff --git a/test/CodeGen/PowerPC/aantidep-def-ec.mir b/test/CodeGen/PowerPC/aantidep-def-ec.mir
index cf6ab35d8db7..09aac7b0240a 100644
--- a/test/CodeGen/PowerPC/aantidep-def-ec.mir
+++ b/test/CodeGen/PowerPC/aantidep-def-ec.mir
@@ -48,22 +48,6 @@ tracksRegLiveness: true
liveins:
- { reg: '%x3' }
- { reg: '%x4' }
-calleeSavedRegisters: [ '%cr2', '%cr3', '%cr4', '%f14', '%f15', '%f16',
- '%f17', '%f18', '%f19', '%f20', '%f21', '%f22',
- '%f23', '%f24', '%f25', '%f26', '%f27', '%f28',
- '%f29', '%f30', '%f31', '%r14', '%r15', '%r16',
- '%r17', '%r18', '%r19', '%r20', '%r21', '%r22',
- '%r23', '%r24', '%r25', '%r26', '%r27', '%r28',
- '%r29', '%r30', '%r31', '%v20', '%v21', '%v22',
- '%v23', '%v24', '%v25', '%v26', '%v27', '%v28',
- '%v29', '%v30', '%v31', '%vf20', '%vf21', '%vf22',
- '%vf23', '%vf24', '%vf25', '%vf26', '%vf27', '%vf28',
- '%vf29', '%vf30', '%vf31', '%x14', '%x15', '%x16',
- '%x17', '%x18', '%x19', '%x20', '%x21', '%x22',
- '%x23', '%x24', '%x25', '%x26', '%x27', '%x28',
- '%x29', '%x30', '%x31', '%cr2eq', '%cr3eq', '%cr4eq',
- '%cr2gt', '%cr3gt', '%cr4gt', '%cr2lt', '%cr3lt',
- '%cr4lt', '%cr2un', '%cr3un', '%cr4un' ]
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
diff --git a/test/CodeGen/PowerPC/addegluecrash.ll b/test/CodeGen/PowerPC/addegluecrash.ll
new file mode 100644
index 000000000000..7605340d305f
--- /dev/null
+++ b/test/CodeGen/PowerPC/addegluecrash.ll
@@ -0,0 +1,58 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+define void @bn_mul_comba8(i64* nocapture %r, i64* nocapture readonly %a, i64* nocapture readonly %b) {
+; CHECK-LABEL: bn_mul_comba8:
+; CHECK: # BB#0:
+; CHECK-NEXT: ld 6, 0(4)
+; CHECK-NEXT: ld 7, 0(5)
+; CHECK-NEXT: mulhdu 8, 7, 6
+; CHECK-NEXT: ld 4, 8(4)
+; CHECK-NEXT: mulld 9, 4, 6
+; CHECK-NEXT: mulhdu 4, 4, 6
+; CHECK-NEXT: addc 6, 9, 8
+; CHECK-NEXT: addze 4, 4
+; CHECK-NEXT: ld 5, 8(5)
+; CHECK-NEXT: mulld 8, 5, 7
+; CHECK-NEXT: mulhdu 5, 5, 7
+; CHECK-NEXT: addc 6, 6, 8
+; CHECK-NEXT: addze 5, 5
+; CHECK-NEXT: add 4, 5, 4
+; CHECK-NEXT: cmpld 7, 4, 5
+; CHECK-NEXT: mfocrf 10, 1
+; CHECK-NEXT: rlwinm 10, 10, 29, 31, 31
+; CHECK-NEXT: # implicit-def: %X4
+; CHECK-NEXT: mr 4, 10
+; CHECK-NEXT: clrldi 4, 4, 32
+; CHECK-NEXT: std 4, 0(3)
+; CHECK-NEXT: blr
+ %1 = load i64, i64* %a, align 8
+ %conv = zext i64 %1 to i128
+ %2 = load i64, i64* %b, align 8
+ %conv2 = zext i64 %2 to i128
+ %mul = mul nuw i128 %conv2, %conv
+ %shr = lshr i128 %mul, 64
+ %agep = getelementptr inbounds i64, i64* %a, i64 1
+ %3 = load i64, i64* %agep, align 8
+ %conv14 = zext i64 %3 to i128
+ %mul15 = mul nuw i128 %conv14, %conv
+ %add17 = add i128 %mul15, %shr
+ %shr19 = lshr i128 %add17, 64
+ %conv20 = trunc i128 %shr19 to i64
+ %bgep = getelementptr inbounds i64, i64* %b, i64 1
+ %4 = load i64, i64* %bgep, align 8
+ %conv28 = zext i64 %4 to i128
+ %mul31 = mul nuw i128 %conv28, %conv2
+ %conv32 = and i128 %add17, 18446744073709551615
+ %add33 = add i128 %conv32, %mul31
+ %shr35 = lshr i128 %add33, 64
+ %conv36 = trunc i128 %shr35 to i64
+ %add37 = add i64 %conv36, %conv20
+ %cmp38 = icmp ult i64 %add37, %conv36
+ %conv148 = zext i1 %cmp38 to i64
+ store i64 %conv148, i64* %r, align 8
+ ret void
+}
+
diff --git a/test/CodeGen/PowerPC/addi-licm.ll b/test/CodeGen/PowerPC/addi-licm.ll
index 37a14899debc..d0178a8aec0e 100644
--- a/test/CodeGen/PowerPC/addi-licm.ll
+++ b/test/CodeGen/PowerPC/addi-licm.ll
@@ -9,9 +9,9 @@ entry:
%x = alloca [2048 x float], align 4
%y = alloca [2048 x float], align 4
%0 = bitcast [2048 x float]* %x to i8*
- call void @llvm.lifetime.start(i64 8192, i8* %0) #2
+ call void @llvm.lifetime.start.p0i8(i64 8192, i8* %0) #2
%1 = bitcast [2048 x float]* %y to i8*
- call void @llvm.lifetime.start(i64 8192, i8* %1) #2
+ call void @llvm.lifetime.start.p0i8(i64 8192, i8* %1) #2
br label %for.body.i
; CHECK-LABEL: @foo
@@ -50,12 +50,12 @@ loop.exit: ; preds = %for.body.i
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #2
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
declare void @bar(float*, float*)
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #2
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
attributes #0 = { nounwind readonly }
attributes #1 = { nounwind }
diff --git a/test/CodeGen/PowerPC/anon_aggr.ll b/test/CodeGen/PowerPC/anon_aggr.ll
index f4e788849ec8..9b32a8f55f34 100644
--- a/test/CodeGen/PowerPC/anon_aggr.ll
+++ b/test/CodeGen/PowerPC/anon_aggr.ll
@@ -60,33 +60,34 @@ equal:
unequal:
ret i8* %array2_ptr
}
-
; CHECK-LABEL: func2:
-; CHECK: ld [[REG2:[0-9]+]], 72(1)
-; CHECK: cmpld {{([0-9]+,)?}}4, [[REG2]]
-; CHECK-DAG: std [[REG2]], -[[OFFSET1:[0-9]+]]
+; CHECK: cmpld {{([0-9]+,)?}}4, 6
+; CHECK-DAG: std 6, 72(1)
+; CHECK-DAG: std 5, 64(1)
+; CHECK-DAG: std 6, -[[OFFSET1:[0-9]+]]
; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]]
; CHECK: ld 3, -[[OFFSET2]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
-; DARWIN32: _func2:
-; DARWIN32: addi r[[REG1:[0-9]+]], r[[REGSP:[0-9]+]], 36
-; DARWIN32: lwz r[[REG2:[0-9]+]], 44(r[[REGSP]])
+; DARWIN32-LABEL: _func2
+; DARWIN32-DAG: addi r[[REG8:[0-9]+]], r[[REGSP:[0-9]+]], 36
+; DARWIN32-DAG: lwz r[[REG2:[0-9]+]], 44(r[[REGSP]])
; DARWIN32: mr
-; DARWIN32: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]]
-; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REGA]], r[[REG2]]
-; DARWIN32: stw r[[REG3]], -[[OFFSET1:[0-9]+]]
-; DARWIN32: stw r[[REG2]], -[[OFFSET2:[0-9]+]]
-; DARWIN32: lwz r3, -[[OFFSET1]]
-; DARWIN32: lwz r3, -[[OFFSET2]]
+; DARWIN32: mr r[[REG7:[0-9]+]], r5
+; DARWIN32-DAG: cmplw {{(cr[0-9]+,)?}}r5, r[[REG2]]
+; DARWIN32-DAG: stw r[[REG7]], -[[OFFSET1:[0-9]+]]
+; DARWIN32-DAG: stw r[[REG2]], -[[OFFSET2:[0-9]+]]
+; DARWIN32-DAG: lwz r3, -[[OFFSET1]]
+; DARWIN32-DAG: lwz r3, -[[OFFSET2]]
+
; DARWIN64: _func2:
; DARWIN64: ld r[[REG2:[0-9]+]], 72(r1)
; DARWIN64: mr
; DARWIN64: mr r[[REG3:[0-9]+]], r[[REGA:[0-9]+]]
; DARWIN64: cmpld {{(cr[0-9]+,)?}}r[[REGA]], r[[REG2]]
-; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]]
; DARWIN64: std r[[REG2]], -[[OFFSET2:[0-9]+]]
+; DARWIN64: std r[[REG3]], -[[OFFSET1:[0-9]+]]
; DARWIN64: ld r3, -[[OFFSET1]]
; DARWIN64: ld r3, -[[OFFSET2]]
@@ -106,24 +107,24 @@ unequal:
}
; CHECK-LABEL: func3:
-; CHECK: ld [[REG3:[0-9]+]], 72(1)
-; CHECK: ld [[REG4:[0-9]+]], 56(1)
-; CHECK: cmpld {{([0-9]+,)?}}[[REG4]], [[REG3]]
-; CHECK: std [[REG3]], -[[OFFSET1:[0-9]+]](1)
-; CHECK: std [[REG4]], -[[OFFSET2:[0-9]+]](1)
+; CHECK: cmpld {{([0-9]+,)?}}4, 6
+; CHECK-DAG: std 4, -[[OFFSET2:[0-9]+]](1)
+; CHECK-DAG: std 6, -[[OFFSET1:[0-9]+]](1)
; CHECK: ld 3, -[[OFFSET2]](1)
; CHECK: ld 3, -[[OFFSET1]](1)
-; DARWIN32: _func3:
-; DARWIN32: addi r[[REG1:[0-9]+]], r[[REGSP:[0-9]+]], 36
-; DARWIN32: addi r[[REG2:[0-9]+]], r[[REGSP]], 24
-; DARWIN32: lwz r[[REG3:[0-9]+]], 44(r[[REGSP]])
-; DARWIN32: lwz r[[REG4:[0-9]+]], 32(r[[REGSP]])
-; DARWIN32: cmplw {{(cr[0-9]+,)?}}r[[REG4]], r[[REG3]]
-; DARWIN32: stw r[[REG3]], -[[OFFSET1:[0-9]+]]
-; DARWIN32: stw r[[REG4]], -[[OFFSET2:[0-9]+]]
-; DARWIN32: lwz r3, -[[OFFSET2]]
-; DARWIN32: lwz r3, -[[OFFSET1]]
+; DARWIN32-LABEL: _func3:
+; DARWIN32-DAG: stw r[[REG8:[0-9]+]], 44(r[[REGSP:[0-9]+]])
+; DARWIN32-DAG: stw r[[REG5:[0-9]+]], 32(r[[REGSP]])
+; DARWIN32-DAG: addi r[[REG5a:[0-9]+]], r[[REGSP:[0-9]+]], 36
+; DARWIN32-DAG: addi r[[REG8a:[0-9]+]], r[[REGSP]], 24
+; DARWIN32-DAG: lwz r[[REG5a:[0-9]+]], 44(r[[REGSP]])
+; DARWIN32-DAG: lwz r[[REG8a:[0-9]+]], 32(r[[REGSP]])
+; DARWIN32-DAG: cmplw {{(cr[0-9]+,)?}}r[[REG8a]], r[[REG5a]]
+; DARWIN32-DAG: stw r[[REG5a]], -[[OFFSET1:[0-9]+]]
+; DARWIN32-DAG: stw r[[REG8a]], -[[OFFSET2:[0-9]+]]
+; DARWIN32-DAG: lwz r3, -[[OFFSET1:[0-9]+]]
+; DARWIN32-DAG: lwz r3, -[[OFFSET2:[0-9]+]]
; DARWIN64: _func3:
; DARWIN64: ld r[[REG3:[0-9]+]], 72(r1)
diff --git a/test/CodeGen/PowerPC/atomics-regression.ll b/test/CodeGen/PowerPC/atomics-regression.ll
new file mode 100644
index 000000000000..9af82b625532
--- /dev/null
+++ b/test/CodeGen/PowerPC/atomics-regression.ll
@@ -0,0 +1,9546 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=powerpc64le-linux-gnu < %s | FileCheck %s -check-prefix=PPC64LE
+
+define i8 @test0(i8* %ptr) {
+; PPC64LE-LABEL: test0:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lbz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i8, i8* %ptr unordered, align 1
+ ret i8 %val
+}
+
+define i8 @test1(i8* %ptr) {
+; PPC64LE-LABEL: test1:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lbz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i8, i8* %ptr monotonic, align 1
+ ret i8 %val
+}
+
+define i8 @test2(i8* %ptr) {
+; PPC64LE-LABEL: test2:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lbz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i8, i8* %ptr acquire, align 1
+ ret i8 %val
+}
+
+define i8 @test3(i8* %ptr) {
+; PPC64LE-LABEL: test3:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: ori 2, 2, 0
+; PPC64LE-NEXT: lbz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i8, i8* %ptr seq_cst, align 1
+ ret i8 %val
+}
+
+define i16 @test4(i16* %ptr) {
+; PPC64LE-LABEL: test4:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lhz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i16, i16* %ptr unordered, align 2
+ ret i16 %val
+}
+
+define i16 @test5(i16* %ptr) {
+; PPC64LE-LABEL: test5:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lhz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i16, i16* %ptr monotonic, align 2
+ ret i16 %val
+}
+
+define i16 @test6(i16* %ptr) {
+; PPC64LE-LABEL: test6:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lhz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i16, i16* %ptr acquire, align 2
+ ret i16 %val
+}
+
+define i16 @test7(i16* %ptr) {
+; PPC64LE-LABEL: test7:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: ori 2, 2, 0
+; PPC64LE-NEXT: lhz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i16, i16* %ptr seq_cst, align 2
+ ret i16 %val
+}
+
+define i32 @test8(i32* %ptr) {
+; PPC64LE-LABEL: test8:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i32, i32* %ptr unordered, align 4
+ ret i32 %val
+}
+
+define i32 @test9(i32* %ptr) {
+; PPC64LE-LABEL: test9:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwz 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i32, i32* %ptr monotonic, align 4
+ ret i32 %val
+}
+
+define i32 @test10(i32* %ptr) {
+; PPC64LE-LABEL: test10:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i32, i32* %ptr acquire, align 4
+ ret i32 %val
+}
+
+define i32 @test11(i32* %ptr) {
+; PPC64LE-LABEL: test11:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: ori 2, 2, 0
+; PPC64LE-NEXT: lwz 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i32, i32* %ptr seq_cst, align 4
+ ret i32 %val
+}
+
+define i64 @test12(i64* %ptr) {
+; PPC64LE-LABEL: test12:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: ld 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i64, i64* %ptr unordered, align 8
+ ret i64 %val
+}
+
+define i64 @test13(i64* %ptr) {
+; PPC64LE-LABEL: test13:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: ld 3, 0(3)
+; PPC64LE-NEXT: blr
+ %val = load atomic i64, i64* %ptr monotonic, align 8
+ ret i64 %val
+}
+
+define i64 @test14(i64* %ptr) {
+; PPC64LE-LABEL: test14:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: ld 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i64, i64* %ptr acquire, align 8
+ ret i64 %val
+}
+
+define i64 @test15(i64* %ptr) {
+; PPC64LE-LABEL: test15:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: ori 2, 2, 0
+; PPC64LE-NEXT: ld 3, 0(3)
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %val = load atomic i64, i64* %ptr seq_cst, align 8
+ ret i64 %val
+}
+
+define void @test16(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test16:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: stb 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i8 %val, i8* %ptr unordered, align 1
+ ret void
+}
+
+define void @test17(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test17:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: stb 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i8 %val, i8* %ptr monotonic, align 1
+ ret void
+}
+
+define void @test18(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test18:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: stb 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i8 %val, i8* %ptr release, align 1
+ ret void
+}
+
+define void @test19(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test19:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: stb 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i8 %val, i8* %ptr seq_cst, align 1
+ ret void
+}
+
+define void @test20(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test20:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sth 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i16 %val, i16* %ptr unordered, align 2
+ ret void
+}
+
+define void @test21(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test21:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sth 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i16 %val, i16* %ptr monotonic, align 2
+ ret void
+}
+
+define void @test22(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test22:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: sth 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i16 %val, i16* %ptr release, align 2
+ ret void
+}
+
+define void @test23(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test23:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: sth 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i16 %val, i16* %ptr seq_cst, align 2
+ ret void
+}
+
+define void @test24(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test24:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: stw 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i32 %val, i32* %ptr unordered, align 4
+ ret void
+}
+
+define void @test25(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test25:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: stw 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i32 %val, i32* %ptr monotonic, align 4
+ ret void
+}
+
+define void @test26(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test26:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: stw 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i32 %val, i32* %ptr release, align 4
+ ret void
+}
+
+define void @test27(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test27:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: stw 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i32 %val, i32* %ptr seq_cst, align 4
+ ret void
+}
+
+define void @test28(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test28:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: std 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i64 %val, i64* %ptr unordered, align 8
+ ret void
+}
+
+define void @test29(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test29:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: std 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i64 %val, i64* %ptr monotonic, align 8
+ ret void
+}
+
+define void @test30(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test30:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: std 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i64 %val, i64* %ptr release, align 8
+ ret void
+}
+
+define void @test31(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test31:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: std 4, 0(3)
+; PPC64LE-NEXT: blr
+ store atomic i64 %val, i64* %ptr seq_cst, align 8
+ ret void
+}
+
+define void @test32() {
+; PPC64LE-LABEL: test32:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence acquire
+ ret void
+}
+
+define void @test33() {
+; PPC64LE-LABEL: test33:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence release
+ ret void
+}
+
+define void @test34() {
+; PPC64LE-LABEL: test34:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence acq_rel
+ ret void
+}
+
+define void @test35() {
+; PPC64LE-LABEL: test35:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: blr
+ fence seq_cst
+ ret void
+}
+
+define void @test36() {
+; PPC64LE-LABEL: test36:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence singlethread acquire
+ ret void
+}
+
+define void @test37() {
+; PPC64LE-LABEL: test37:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence singlethread release
+ ret void
+}
+
+define void @test38() {
+; PPC64LE-LABEL: test38:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ fence singlethread acq_rel
+ ret void
+}
+
+define void @test39() {
+; PPC64LE-LABEL: test39:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: blr
+ fence singlethread seq_cst
+ ret void
+}
+
+define void @test40(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test40:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB40_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB40_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB40_2
+; PPC64LE-NEXT: .LBB40_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB40_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
+ ret void
+}
+
+define void @test41(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test41:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB41_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB41_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB41_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB41_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic
+ ret void
+}
+
+define void @test42(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test42:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB42_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB42_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB42_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB42_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire
+ ret void
+}
+
+define void @test43(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test43:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB43_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB43_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB43_2
+; PPC64LE-NEXT: .LBB43_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB43_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic
+ ret void
+}
+
+define void @test44(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test44:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB44_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB44_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB44_2
+; PPC64LE-NEXT: .LBB44_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB44_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire
+ ret void
+}
+
+define void @test45(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test45:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB45_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB45_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB45_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB45_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic
+ ret void
+}
+
+define void @test46(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test46:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB46_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB46_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB46_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB46_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire
+ ret void
+}
+
+define void @test47(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test47:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB47_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB47_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB47_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB47_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic
+ ret void
+}
+
+define void @test48(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test48:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB48_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB48_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB48_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB48_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire
+ ret void
+}
+
+define void @test49(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test49:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB49_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB49_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB49_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB49_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst
+ ret void
+}
+
+define void @test50(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test50:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB50_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB50_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB50_2
+; PPC64LE-NEXT: .LBB50_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB50_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic
+ ret void
+}
+
+define void @test51(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test51:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB51_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB51_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB51_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB51_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic
+ ret void
+}
+
+define void @test52(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test52:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB52_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB52_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB52_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB52_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire
+ ret void
+}
+
+define void @test53(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test53:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB53_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB53_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB53_2
+; PPC64LE-NEXT: .LBB53_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB53_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic
+ ret void
+}
+
+define void @test54(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test54:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB54_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB54_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB54_2
+; PPC64LE-NEXT: .LBB54_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB54_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire
+ ret void
+}
+
+define void @test55(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test55:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB55_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB55_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB55_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB55_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic
+ ret void
+}
+
+define void @test56(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test56:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB56_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB56_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB56_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB56_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire
+ ret void
+}
+
+define void @test57(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test57:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB57_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB57_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB57_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB57_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic
+ ret void
+}
+
+define void @test58(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test58:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB58_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB58_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB58_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB58_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire
+ ret void
+}
+
+define void @test59(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test59:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB59_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB59_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB59_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB59_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst
+ ret void
+}
+
+define void @test60(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test60:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB60_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB60_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB60_2
+; PPC64LE-NEXT: .LBB60_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB60_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic
+ ret void
+}
+
+define void @test61(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test61:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB61_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB61_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB61_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB61_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic
+ ret void
+}
+
+define void @test62(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test62:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB62_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB62_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB62_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB62_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire
+ ret void
+}
+
+define void @test63(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test63:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB63_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB63_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB63_2
+; PPC64LE-NEXT: .LBB63_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB63_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic
+ ret void
+}
+
+define void @test64(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test64:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB64_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB64_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB64_2
+; PPC64LE-NEXT: .LBB64_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB64_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire
+ ret void
+}
+
+define void @test65(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test65:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB65_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB65_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB65_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB65_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic
+ ret void
+}
+
+define void @test66(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test66:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB66_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB66_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB66_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB66_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire
+ ret void
+}
+
+define void @test67(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test67:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB67_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB67_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB67_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB67_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic
+ ret void
+}
+
+define void @test68(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test68:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB68_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB68_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB68_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB68_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire
+ ret void
+}
+
+define void @test69(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test69:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB69_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB69_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB69_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB69_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst
+ ret void
+}
+
+define void @test70(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test70:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB70_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB70_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB70_2
+; PPC64LE-NEXT: .LBB70_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB70_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic
+ ret void
+}
+
+define void @test71(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test71:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB71_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB71_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB71_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB71_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic
+ ret void
+}
+
+define void @test72(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test72:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB72_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB72_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB72_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB72_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire
+ ret void
+}
+
+define void @test73(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test73:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB73_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB73_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB73_2
+; PPC64LE-NEXT: .LBB73_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB73_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic
+ ret void
+}
+
+define void @test74(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test74:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB74_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB74_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB74_2
+; PPC64LE-NEXT: .LBB74_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB74_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire
+ ret void
+}
+
+define void @test75(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test75:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB75_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB75_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB75_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB75_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic
+ ret void
+}
+
+define void @test76(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test76:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB76_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB76_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB76_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB76_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire
+ ret void
+}
+
+define void @test77(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test77:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB77_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB77_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB77_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB77_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic
+ ret void
+}
+
+define void @test78(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test78:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB78_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB78_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB78_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB78_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire
+ ret void
+}
+
+define void @test79(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test79:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB79_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB79_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB79_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB79_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst
+ ret void
+}
+
+define void @test80(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test80:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB80_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB80_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB80_2
+; PPC64LE-NEXT: .LBB80_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB80_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread monotonic monotonic
+ ret void
+}
+
+define void @test81(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test81:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB81_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB81_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB81_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB81_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acquire monotonic
+ ret void
+}
+
+define void @test82(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test82:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB82_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB82_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB82_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB82_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acquire acquire
+ ret void
+}
+
+define void @test83(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test83:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB83_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB83_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB83_2
+; PPC64LE-NEXT: .LBB83_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB83_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread release monotonic
+ ret void
+}
+
+define void @test84(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test84:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB84_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB84_1:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB84_2
+; PPC64LE-NEXT: .LBB84_2:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB84_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread release acquire
+ ret void
+}
+
+define void @test85(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test85:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB85_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB85_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB85_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB85_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acq_rel monotonic
+ ret void
+}
+
+define void @test86(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test86:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB86_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB86_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB86_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB86_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread acq_rel acquire
+ ret void
+}
+
+define void @test87(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test87:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB87_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB87_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB87_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB87_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst monotonic
+ ret void
+}
+
+define void @test88(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test88:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB88_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB88_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB88_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB88_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst acquire
+ ret void
+}
+
+define void @test89(i8* %ptr, i8 %cmp, i8 %val) {
+; PPC64LE-LABEL: test89:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB89_1:
+; PPC64LE-NEXT: lbarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB89_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB89_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB89_4:
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i8* %ptr, i8 %cmp, i8 %val singlethread seq_cst seq_cst
+ ret void
+}
+
+define void @test90(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test90:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB90_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB90_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB90_2
+; PPC64LE-NEXT: .LBB90_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB90_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread monotonic monotonic
+ ret void
+}
+
+define void @test91(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test91:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB91_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB91_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB91_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB91_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acquire monotonic
+ ret void
+}
+
+define void @test92(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test92:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB92_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB92_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB92_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB92_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acquire acquire
+ ret void
+}
+
+define void @test93(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test93:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB93_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB93_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB93_2
+; PPC64LE-NEXT: .LBB93_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB93_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread release monotonic
+ ret void
+}
+
+define void @test94(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test94:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB94_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB94_1:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB94_2
+; PPC64LE-NEXT: .LBB94_2:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB94_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread release acquire
+ ret void
+}
+
+define void @test95(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test95:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB95_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB95_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB95_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB95_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acq_rel monotonic
+ ret void
+}
+
+define void @test96(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test96:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB96_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB96_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB96_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB96_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread acq_rel acquire
+ ret void
+}
+
+define void @test97(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test97:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB97_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB97_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB97_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB97_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst monotonic
+ ret void
+}
+
+define void @test98(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test98:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB98_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB98_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB98_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB98_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst acquire
+ ret void
+}
+
+define void @test99(i16* %ptr, i16 %cmp, i16 %val) {
+; PPC64LE-LABEL: test99:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB99_1:
+; PPC64LE-NEXT: lharx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB99_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB99_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB99_4:
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val singlethread seq_cst seq_cst
+ ret void
+}
+
+define void @test100(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test100:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB100_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB100_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB100_2
+; PPC64LE-NEXT: .LBB100_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB100_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread monotonic monotonic
+ ret void
+}
+
+define void @test101(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test101:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB101_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB101_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB101_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB101_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acquire monotonic
+ ret void
+}
+
+define void @test102(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test102:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB102_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB102_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB102_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB102_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acquire acquire
+ ret void
+}
+
+define void @test103(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test103:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB103_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB103_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB103_2
+; PPC64LE-NEXT: .LBB103_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB103_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread release monotonic
+ ret void
+}
+
+define void @test104(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test104:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB104_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB104_1:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB104_2
+; PPC64LE-NEXT: .LBB104_2:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: beq 0, .LBB104_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread release acquire
+ ret void
+}
+
+define void @test105(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test105:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB105_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB105_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB105_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB105_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acq_rel monotonic
+ ret void
+}
+
+define void @test106(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test106:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB106_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB106_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB106_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB106_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread acq_rel acquire
+ ret void
+}
+
+define void @test107(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test107:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB107_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB107_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB107_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB107_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst monotonic
+ ret void
+}
+
+define void @test108(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test108:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB108_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB108_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB108_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB108_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst acquire
+ ret void
+}
+
+define void @test109(i32* %ptr, i32 %cmp, i32 %val) {
+; PPC64LE-LABEL: test109:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB109_1:
+; PPC64LE-NEXT: lwarx 6, 0, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bne 0, .LBB109_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB109_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB109_4:
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i32* %ptr, i32 %cmp, i32 %val singlethread seq_cst seq_cst
+ ret void
+}
+
+define void @test110(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test110:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: b .LBB110_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB110_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB110_2
+; PPC64LE-NEXT: .LBB110_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB110_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread monotonic monotonic
+ ret void
+}
+
+define void @test111(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test111:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB111_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB111_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB111_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB111_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acquire monotonic
+ ret void
+}
+
+define void @test112(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test112:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB112_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB112_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB112_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB112_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acquire acquire
+ ret void
+}
+
+define void @test113(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test113:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB113_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB113_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB113_2
+; PPC64LE-NEXT: .LBB113_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB113_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread release monotonic
+ ret void
+}
+
+define void @test114(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test114:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: b .LBB114_2
+; PPC64LE-NEXT: .p2align 5
+; PPC64LE-NEXT: .LBB114_1:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: beqlr 0
+; PPC64LE-NEXT: b .LBB114_2
+; PPC64LE-NEXT: .LBB114_2:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: beq 0, .LBB114_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread release acquire
+ ret void
+}
+
+define void @test115(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test115:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB115_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB115_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB115_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB115_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acq_rel monotonic
+ ret void
+}
+
+define void @test116(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test116:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB116_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB116_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB116_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB116_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread acq_rel acquire
+ ret void
+}
+
+define void @test117(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test117:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB117_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB117_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB117_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB117_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst monotonic
+ ret void
+}
+
+define void @test118(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test118:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB118_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB118_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB118_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB118_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst acquire
+ ret void
+}
+
+define void @test119(i64* %ptr, i64 %cmp, i64 %val) {
+; PPC64LE-LABEL: test119:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB119_1:
+; PPC64LE-NEXT: ldarx 6, 0, 3
+; PPC64LE-NEXT: cmpd 4, 6
+; PPC64LE-NEXT: bne 0, .LBB119_4
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 5, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB119_1
+; PPC64LE-NEXT: # BB#3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+; PPC64LE-NEXT: .LBB119_4:
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %res = cmpxchg i64* %ptr, i64 %cmp, i64 %val singlethread seq_cst seq_cst
+ ret void
+}
+
+define i8 @test120(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test120:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB120_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB120_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test121(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test121:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB121_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB121_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test122(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test122:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB122_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB122_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test123(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test123:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB123_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB123_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test124(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test124:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB124_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB124_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test125(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test125:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB125_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB125_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test126(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test126:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB126_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB126_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test127(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test127:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB127_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB127_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test128(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test128:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB128_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB128_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test129(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test129:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB129_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB129_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test130(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test130:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB130_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB130_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test131(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test131:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB131_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB131_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test132(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test132:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB132_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB132_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test133(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test133:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB133_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB133_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test134(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test134:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB134_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB134_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test135(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test135:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB135_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB135_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test136(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test136:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB136_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB136_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test137(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test137:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB137_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB137_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test138(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test138:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB138_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB138_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test139(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test139:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB139_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB139_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test140(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test140:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB140_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB140_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test141(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test141:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB141_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB141_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test142(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test142:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB142_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB142_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test143(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test143:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB143_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB143_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test144(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test144:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB144_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB144_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test145(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test145:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB145_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB145_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test146(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test146:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB146_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB146_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test147(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test147:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB147_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB147_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test148(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test148:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB148_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB148_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test149(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test149:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB149_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB149_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test150(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test150:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB150_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB150_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test151(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test151:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB151_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB151_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test152(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test152:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB152_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB152_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test153(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test153:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB153_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB153_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test154(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test154:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB154_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB154_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test155(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test155:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB155_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB155_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test156(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test156:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB156_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB156_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test157(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test157:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB157_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB157_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test158(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test158:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB158_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB158_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test159(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test159:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB159_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB159_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test160(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test160:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB160_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB160_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test161(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test161:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB161_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB161_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test162(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test162:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB162_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB162_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test163(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test163:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB163_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB163_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test164(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test164:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB164_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB164_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test165(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test165:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB165_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB165_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test166(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test166:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB166_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB166_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test167(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test167:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB167_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB167_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test168(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test168:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB168_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB168_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test169(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test169:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB169_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB169_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test170(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test170:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB170_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB170_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test171(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test171:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB171_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB171_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test172(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test172:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB172_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB172_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test173(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test173:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB173_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB173_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test174(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test174:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB174_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB174_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test175(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test175:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB175_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB175_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test176(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test176:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB176_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: sub 6, 3, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB176_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test177(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test177:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB177_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB177_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test178(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test178:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB178_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB178_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test179(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test179:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB179_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB179_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test180(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test180:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB180_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB180_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test181(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test181:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB181_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB181_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test182(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test182:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB182_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB182_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test183(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test183:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB183_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB183_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test184(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test184:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB184_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB184_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test185(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test185:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB185_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB185_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test186(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test186:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB186_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB186_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test187(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test187:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB187_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB187_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test188(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test188:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB188_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB188_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test189(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test189:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB189_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB189_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test190(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test190:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB190_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB190_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test191(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test191:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB191_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB191_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test192(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test192:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB192_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB192_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test193(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test193:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB193_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB193_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test194(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test194:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB194_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB194_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test195(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test195:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB195_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB195_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test196(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test196:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB196_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB196_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test197(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test197:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB197_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB197_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test198(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test198:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB198_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB198_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test199(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test199:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB199_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB199_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test200(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test200:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB200_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB200_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test201(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test201:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB201_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB201_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test202(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test202:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB202_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB202_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test203(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test203:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB203_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB203_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test204(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test204:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB204_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB204_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test205(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test205:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB205_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB205_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test206(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test206:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB206_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB206_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test207(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test207:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB207_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB207_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test208(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test208:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB208_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB208_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test209(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test209:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB209_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB209_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test210(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test210:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB210_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB210_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test211(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test211:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB211_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB211_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test212(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test212:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB212_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB212_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test213(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test213:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB213_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB213_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test214(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test214:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB214_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB214_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test215(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test215:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB215_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB215_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test216(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test216:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB216_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB216_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test217(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test217:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB217_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB217_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test218(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test218:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB218_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB218_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test219(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test219:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB219_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB219_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test220(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test220:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB220_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB220_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test221(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test221:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB221_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB221_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test222(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test222:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB222_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB222_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test223(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test223:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB223_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB223_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test224(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test224:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB224_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB224_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test225(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test225:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB225_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB225_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test226(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test226:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB226_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB226_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test227(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test227:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB227_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB227_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test228(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test228:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB228_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB228_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test229(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test229:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB229_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB229_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test230(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test230:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB230_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB230_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test231(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test231:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB231_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB231_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test232(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test232:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB232_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB232_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test233(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test233:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB233_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB233_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test234(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test234:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB234_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB234_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test235(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test235:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB235_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB235_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test236(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test236:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB236_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB236_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test237(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test237:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB237_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB237_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test238(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test238:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB238_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB238_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test239(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test239:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB239_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB239_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test240(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test240:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB240_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB240_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test241(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test241:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB241_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB241_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test242(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test242:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB242_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB242_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test243(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test243:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB243_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB243_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test244(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test244:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB244_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB244_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test245(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test245:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB245_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB245_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test246(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test246:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB246_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB246_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test247(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test247:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB247_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB247_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test248(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test248:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB248_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB248_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test249(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test249:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB249_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB249_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test250(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test250:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB250_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB250_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test251(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test251:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB251_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB251_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test252(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test252:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB252_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB252_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test253(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test253:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB253_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB253_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test254(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test254:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB254_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB254_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test255(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test255:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB255_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB255_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test256(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test256:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB256_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB256_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test257(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test257:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB257_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB257_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test258(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test258:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB258_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB258_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test259(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test259:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB259_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB259_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test260(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test260:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB260_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB260_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB260_1
+; PPC64LE-NEXT: .LBB260_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test261(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test261:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB261_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: extsb 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB261_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB261_1
+; PPC64LE-NEXT: .LBB261_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test262(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test262:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB262_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB262_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB262_1
+; PPC64LE-NEXT: .LBB262_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test263(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test263:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB263_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB263_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB263_1
+; PPC64LE-NEXT: .LBB263_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test264(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test264:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB264_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB264_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB264_1
+; PPC64LE-NEXT: .LBB264_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test265(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test265:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB265_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB265_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB265_1
+; PPC64LE-NEXT: .LBB265_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test266(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test266:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB266_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: extsh 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB266_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB266_1
+; PPC64LE-NEXT: .LBB266_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test267(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test267:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB267_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB267_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB267_1
+; PPC64LE-NEXT: .LBB267_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test268(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test268:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB268_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB268_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB268_1
+; PPC64LE-NEXT: .LBB268_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test269(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test269:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB269_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB269_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB269_1
+; PPC64LE-NEXT: .LBB269_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test270(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test270:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB270_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB270_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB270_1
+; PPC64LE-NEXT: .LBB270_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test271(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test271:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB271_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmpw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB271_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB271_1
+; PPC64LE-NEXT: .LBB271_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test272(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test272:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB272_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB272_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB272_1
+; PPC64LE-NEXT: .LBB272_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test273(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test273:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB273_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB273_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB273_1
+; PPC64LE-NEXT: .LBB273_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test274(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test274:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB274_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB274_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB274_1
+; PPC64LE-NEXT: .LBB274_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test275(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test275:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB275_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB275_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB275_1
+; PPC64LE-NEXT: .LBB275_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test276(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test276:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB276_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpd 4, 3
+; PPC64LE-NEXT: ble 0, .LBB276_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB276_1
+; PPC64LE-NEXT: .LBB276_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test277(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test277:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB277_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB277_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB277_1
+; PPC64LE-NEXT: .LBB277_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test278(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test278:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB278_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB278_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB278_1
+; PPC64LE-NEXT: .LBB278_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test279(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test279:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB279_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB279_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB279_1
+; PPC64LE-NEXT: .LBB279_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test280(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test280:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB280_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB280_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB280_1
+; PPC64LE-NEXT: .LBB280_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test281(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test281:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB281_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: extsb 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB281_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB281_1
+; PPC64LE-NEXT: .LBB281_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test282(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test282:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB282_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB282_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB282_1
+; PPC64LE-NEXT: .LBB282_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test283(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test283:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB283_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB283_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB283_1
+; PPC64LE-NEXT: .LBB283_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test284(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test284:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB284_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB284_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB284_1
+; PPC64LE-NEXT: .LBB284_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test285(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test285:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB285_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB285_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB285_1
+; PPC64LE-NEXT: .LBB285_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test286(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test286:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB286_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: extsh 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB286_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB286_1
+; PPC64LE-NEXT: .LBB286_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test287(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test287:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB287_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB287_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB287_1
+; PPC64LE-NEXT: .LBB287_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test288(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test288:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB288_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB288_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB288_1
+; PPC64LE-NEXT: .LBB288_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test289(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test289:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB289_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB289_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB289_1
+; PPC64LE-NEXT: .LBB289_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test290(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test290:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB290_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB290_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB290_1
+; PPC64LE-NEXT: .LBB290_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test291(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test291:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB291_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmpw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB291_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB291_1
+; PPC64LE-NEXT: .LBB291_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test292(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test292:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB292_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB292_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB292_1
+; PPC64LE-NEXT: .LBB292_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test293(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test293:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB293_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB293_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB293_1
+; PPC64LE-NEXT: .LBB293_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test294(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test294:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB294_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB294_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB294_1
+; PPC64LE-NEXT: .LBB294_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test295(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test295:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB295_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB295_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB295_1
+; PPC64LE-NEXT: .LBB295_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test296(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test296:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB296_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpd 4, 3
+; PPC64LE-NEXT: bge 0, .LBB296_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB296_1
+; PPC64LE-NEXT: .LBB296_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test297(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test297:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB297_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB297_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB297_1
+; PPC64LE-NEXT: .LBB297_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test298(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test298:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB298_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB298_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB298_1
+; PPC64LE-NEXT: .LBB298_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test299(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test299:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB299_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB299_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB299_1
+; PPC64LE-NEXT: .LBB299_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test300(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test300:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB300_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB300_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB300_1
+; PPC64LE-NEXT: .LBB300_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test301(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test301:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB301_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB301_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB301_1
+; PPC64LE-NEXT: .LBB301_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test302(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test302:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB302_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB302_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB302_1
+; PPC64LE-NEXT: .LBB302_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test303(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test303:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB303_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB303_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB303_1
+; PPC64LE-NEXT: .LBB303_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test304(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test304:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB304_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB304_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB304_1
+; PPC64LE-NEXT: .LBB304_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test305(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test305:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB305_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB305_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB305_1
+; PPC64LE-NEXT: .LBB305_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test306(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test306:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB306_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB306_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB306_1
+; PPC64LE-NEXT: .LBB306_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test307(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test307:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB307_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB307_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB307_1
+; PPC64LE-NEXT: .LBB307_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test308(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test308:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB308_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB308_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB308_1
+; PPC64LE-NEXT: .LBB308_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test309(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test309:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB309_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB309_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB309_1
+; PPC64LE-NEXT: .LBB309_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test310(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test310:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB310_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB310_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB310_1
+; PPC64LE-NEXT: .LBB310_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test311(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test311:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB311_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB311_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB311_1
+; PPC64LE-NEXT: .LBB311_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test312(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test312:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB312_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB312_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB312_1
+; PPC64LE-NEXT: .LBB312_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test313(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test313:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB313_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB313_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB313_1
+; PPC64LE-NEXT: .LBB313_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test314(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test314:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB314_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB314_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB314_1
+; PPC64LE-NEXT: .LBB314_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test315(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test315:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB315_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB315_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB315_1
+; PPC64LE-NEXT: .LBB315_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test316(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test316:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB316_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpld 4, 3
+; PPC64LE-NEXT: ble 0, .LBB316_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB316_1
+; PPC64LE-NEXT: .LBB316_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test317(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test317:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB317_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB317_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB317_1
+; PPC64LE-NEXT: .LBB317_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test318(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test318:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB318_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB318_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB318_1
+; PPC64LE-NEXT: .LBB318_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test319(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test319:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB319_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB319_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB319_1
+; PPC64LE-NEXT: .LBB319_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test320(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test320:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB320_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB320_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB320_1
+; PPC64LE-NEXT: .LBB320_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val monotonic
+ ret i8 %ret
+}
+
+define i8 @test321(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test321:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB321_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB321_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB321_1
+; PPC64LE-NEXT: .LBB321_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val acquire
+ ret i8 %ret
+}
+
+define i8 @test322(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test322:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB322_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB322_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB322_1
+; PPC64LE-NEXT: .LBB322_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val release
+ ret i8 %ret
+}
+
+define i8 @test323(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test323:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB323_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB323_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB323_1
+; PPC64LE-NEXT: .LBB323_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val acq_rel
+ ret i8 %ret
+}
+
+define i8 @test324(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test324:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB324_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB324_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB324_1
+; PPC64LE-NEXT: .LBB324_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val seq_cst
+ ret i8 %ret
+}
+
+define i16 @test325(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test325:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB325_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB325_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB325_1
+; PPC64LE-NEXT: .LBB325_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val monotonic
+ ret i16 %ret
+}
+
+define i16 @test326(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test326:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB326_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB326_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB326_1
+; PPC64LE-NEXT: .LBB326_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val acquire
+ ret i16 %ret
+}
+
+define i16 @test327(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test327:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB327_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB327_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB327_1
+; PPC64LE-NEXT: .LBB327_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val release
+ ret i16 %ret
+}
+
+define i16 @test328(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test328:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB328_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB328_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB328_1
+; PPC64LE-NEXT: .LBB328_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val acq_rel
+ ret i16 %ret
+}
+
+define i16 @test329(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test329:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB329_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB329_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB329_1
+; PPC64LE-NEXT: .LBB329_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val seq_cst
+ ret i16 %ret
+}
+
+define i32 @test330(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test330:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB330_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB330_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB330_1
+; PPC64LE-NEXT: .LBB330_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val monotonic
+ ret i32 %ret
+}
+
+define i32 @test331(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test331:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB331_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB331_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB331_1
+; PPC64LE-NEXT: .LBB331_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val acquire
+ ret i32 %ret
+}
+
+define i32 @test332(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test332:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB332_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB332_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB332_1
+; PPC64LE-NEXT: .LBB332_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val release
+ ret i32 %ret
+}
+
+define i32 @test333(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test333:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB333_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB333_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB333_1
+; PPC64LE-NEXT: .LBB333_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val acq_rel
+ ret i32 %ret
+}
+
+define i32 @test334(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test334:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB334_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB334_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB334_1
+; PPC64LE-NEXT: .LBB334_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val seq_cst
+ ret i32 %ret
+}
+
+define i64 @test335(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test335:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB335_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB335_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB335_1
+; PPC64LE-NEXT: .LBB335_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val monotonic
+ ret i64 %ret
+}
+
+define i64 @test336(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test336:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB336_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpld 4, 3
+; PPC64LE-NEXT: bge 0, .LBB336_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB336_1
+; PPC64LE-NEXT: .LBB336_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val acquire
+ ret i64 %ret
+}
+
+define i64 @test337(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test337:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB337_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB337_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB337_1
+; PPC64LE-NEXT: .LBB337_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val release
+ ret i64 %ret
+}
+
+define i64 @test338(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test338:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB338_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB338_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB338_1
+; PPC64LE-NEXT: .LBB338_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val acq_rel
+ ret i64 %ret
+}
+
+define i64 @test339(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test339:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB339_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB339_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB339_1
+; PPC64LE-NEXT: .LBB339_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val seq_cst
+ ret i64 %ret
+}
+
+define i8 @test340(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test340:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB340_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB340_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test341(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test341:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB341_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB341_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test342(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test342:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB342_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB342_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test343(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test343:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB343_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB343_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test344(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test344:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB344_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB344_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test345(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test345:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB345_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB345_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test346(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test346:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB346_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB346_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test347(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test347:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB347_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB347_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test348(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test348:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB348_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB348_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test349(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test349:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB349_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB349_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test350(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test350:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB350_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB350_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test351(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test351:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB351_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB351_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test352(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test352:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB352_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB352_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test353(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test353:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB353_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB353_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test354(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test354:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB354_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB354_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test355(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test355:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB355_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB355_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test356(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test356:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB356_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB356_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test357(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test357:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB357_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB357_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test358(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test358:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB358_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB358_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test359(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test359:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB359_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB359_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xchg i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test360(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test360:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB360_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB360_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test361(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test361:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB361_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB361_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test362(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test362:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB362_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB362_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test363(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test363:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB363_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB363_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test364(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test364:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB364_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB364_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test365(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test365:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB365_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB365_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test366(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test366:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB366_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB366_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test367(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test367:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB367_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB367_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test368(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test368:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB368_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB368_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test369(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test369:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB369_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB369_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test370(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test370:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB370_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB370_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test371(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test371:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB371_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB371_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test372(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test372:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB372_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB372_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test373(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test373:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB373_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB373_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test374(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test374:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB374_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB374_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test375(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test375:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB375_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB375_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test376(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test376:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB376_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: add 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB376_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test377(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test377:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB377_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB377_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test378(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test378:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB378_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB378_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test379(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test379:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB379_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: add 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB379_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw add i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test380(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test380:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB380_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB380_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test381(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test381:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB381_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB381_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test382(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test382:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB382_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB382_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test383(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test383:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB383_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB383_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test384(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test384:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB384_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB384_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test385(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test385:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB385_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB385_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test386(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test386:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB386_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB386_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test387(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test387:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB387_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB387_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test388(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test388:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB388_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB388_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test389(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test389:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB389_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB389_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test390(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test390:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB390_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB390_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test391(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test391:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB391_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: subf 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB391_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test392(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test392:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB392_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB392_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test393(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test393:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB393_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB393_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test394(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test394:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB394_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: subf 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB394_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test395(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test395:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB395_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB395_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test396(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test396:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB396_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: sub 6, 3, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB396_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test397(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test397:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB397_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB397_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test398(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test398:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB398_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB398_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test399(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test399:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB399_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: sub 6, 5, 4
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB399_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw sub i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test400(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test400:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB400_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB400_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test401(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test401:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB401_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB401_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test402(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test402:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB402_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB402_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test403(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test403:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB403_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB403_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test404(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test404:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB404_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB404_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test405(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test405:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB405_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB405_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test406(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test406:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB406_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB406_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test407(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test407:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB407_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB407_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test408(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test408:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB408_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB408_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test409(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test409:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB409_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB409_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test410(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test410:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB410_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB410_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test411(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test411:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB411_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB411_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test412(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test412:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB412_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB412_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test413(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test413:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB413_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB413_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test414(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test414:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB414_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB414_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test415(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test415:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB415_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB415_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test416(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test416:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB416_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: and 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB416_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test417(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test417:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB417_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB417_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test418(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test418:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB418_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB418_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test419(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test419:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB419_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: and 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB419_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw and i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test420(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test420:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB420_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB420_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test421(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test421:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB421_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB421_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test422(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test422:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB422_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB422_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test423(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test423:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB423_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB423_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test424(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test424:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB424_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB424_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test425(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test425:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB425_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB425_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test426(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test426:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB426_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB426_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test427(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test427:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB427_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB427_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test428(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test428:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB428_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB428_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test429(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test429:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB429_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB429_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test430(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test430:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB430_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB430_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test431(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test431:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB431_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB431_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test432(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test432:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB432_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB432_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test433(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test433:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB433_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB433_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test434(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test434:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB434_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB434_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test435(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test435:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB435_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB435_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test436(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test436:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB436_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: nand 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB436_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test437(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test437:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB437_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB437_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test438(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test438:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB438_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB438_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test439(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test439:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB439_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: nand 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB439_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw nand i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test440(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test440:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB440_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB440_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test441(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test441:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB441_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB441_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test442(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test442:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB442_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB442_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test443(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test443:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB443_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB443_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test444(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test444:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB444_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB444_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test445(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test445:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB445_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB445_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test446(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test446:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB446_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB446_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test447(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test447:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB447_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB447_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test448(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test448:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB448_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB448_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test449(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test449:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB449_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB449_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test450(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test450:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB450_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB450_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test451(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test451:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB451_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB451_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test452(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test452:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB452_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB452_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test453(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test453:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB453_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB453_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test454(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test454:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB454_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB454_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test455(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test455:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB455_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB455_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test456(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test456:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB456_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: or 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB456_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test457(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test457:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB457_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB457_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test458(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test458:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB458_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB458_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test459(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test459:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB459_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: or 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB459_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw or i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test460(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test460:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB460_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB460_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test461(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test461:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB461_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stbcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB461_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test462(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test462:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB462_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB462_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test463(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test463:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB463_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB463_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test464(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test464:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB464_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stbcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB464_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test465(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test465:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB465_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB465_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test466(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test466:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB466_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: sthcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB466_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test467(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test467:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB467_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB467_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test468(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test468:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB468_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB468_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test469(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test469:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB469_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: sthcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB469_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test470(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test470:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB470_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB470_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test471(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test471:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB471_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stwcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB471_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test472(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test472:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB472_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB472_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test473(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test473:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB473_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB473_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test474(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test474:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB474_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stwcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB474_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test475(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test475:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB475_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB475_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test476(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test476:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB476_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: xor 6, 4, 3
+; PPC64LE-NEXT: stdcx. 6, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB476_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test477(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test477:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB477_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB477_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test478(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test478:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB478_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB478_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test479(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test479:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB479_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: xor 6, 4, 5
+; PPC64LE-NEXT: stdcx. 6, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB479_1
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw xor i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test480(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test480:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB480_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB480_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB480_1
+; PPC64LE-NEXT: .LBB480_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test481(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test481:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB481_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: extsb 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB481_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB481_1
+; PPC64LE-NEXT: .LBB481_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test482(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test482:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB482_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB482_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB482_1
+; PPC64LE-NEXT: .LBB482_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test483(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test483:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB483_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB483_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB483_1
+; PPC64LE-NEXT: .LBB483_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test484(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test484:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB484_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB484_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB484_1
+; PPC64LE-NEXT: .LBB484_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test485(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test485:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB485_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB485_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB485_1
+; PPC64LE-NEXT: .LBB485_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test486(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test486:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB486_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: extsh 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB486_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB486_1
+; PPC64LE-NEXT: .LBB486_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test487(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test487:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB487_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB487_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB487_1
+; PPC64LE-NEXT: .LBB487_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test488(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test488:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB488_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB488_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB488_1
+; PPC64LE-NEXT: .LBB488_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test489(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test489:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB489_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: ble 0, .LBB489_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB489_1
+; PPC64LE-NEXT: .LBB489_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test490(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test490:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB490_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB490_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB490_1
+; PPC64LE-NEXT: .LBB490_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test491(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test491:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB491_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmpw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB491_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB491_1
+; PPC64LE-NEXT: .LBB491_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test492(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test492:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB492_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB492_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB492_1
+; PPC64LE-NEXT: .LBB492_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test493(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test493:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB493_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB493_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB493_1
+; PPC64LE-NEXT: .LBB493_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test494(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test494:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB494_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB494_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB494_1
+; PPC64LE-NEXT: .LBB494_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test495(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test495:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB495_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB495_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB495_1
+; PPC64LE-NEXT: .LBB495_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test496(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test496:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB496_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpd 4, 3
+; PPC64LE-NEXT: ble 0, .LBB496_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB496_1
+; PPC64LE-NEXT: .LBB496_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test497(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test497:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB497_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB497_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB497_1
+; PPC64LE-NEXT: .LBB497_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test498(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test498:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB498_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB498_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB498_1
+; PPC64LE-NEXT: .LBB498_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test499(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test499:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB499_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: ble 0, .LBB499_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB499_1
+; PPC64LE-NEXT: .LBB499_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw max i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test500(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test500:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB500_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB500_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB500_1
+; PPC64LE-NEXT: .LBB500_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test501(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test501:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB501_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: extsb 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB501_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB501_1
+; PPC64LE-NEXT: .LBB501_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test502(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test502:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB502_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB502_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB502_1
+; PPC64LE-NEXT: .LBB502_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test503(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test503:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB503_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB503_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB503_1
+; PPC64LE-NEXT: .LBB503_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test504(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test504:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB504_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: extsb 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB504_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB504_1
+; PPC64LE-NEXT: .LBB504_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test505(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test505:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB505_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB505_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB505_1
+; PPC64LE-NEXT: .LBB505_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test506(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test506:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB506_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: extsh 6, 3
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB506_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB506_1
+; PPC64LE-NEXT: .LBB506_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test507(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test507:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB507_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB507_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB507_1
+; PPC64LE-NEXT: .LBB507_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test508(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test508:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB508_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB508_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB508_1
+; PPC64LE-NEXT: .LBB508_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test509(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test509:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB509_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: extsh 6, 5
+; PPC64LE-NEXT: cmpw 4, 6
+; PPC64LE-NEXT: bge 0, .LBB509_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB509_1
+; PPC64LE-NEXT: .LBB509_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test510(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test510:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB510_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB510_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB510_1
+; PPC64LE-NEXT: .LBB510_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test511(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test511:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB511_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmpw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB511_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB511_1
+; PPC64LE-NEXT: .LBB511_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test512(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test512:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB512_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB512_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB512_1
+; PPC64LE-NEXT: .LBB512_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test513(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test513:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB513_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB513_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB513_1
+; PPC64LE-NEXT: .LBB513_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test514(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test514:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB514_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmpw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB514_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB514_1
+; PPC64LE-NEXT: .LBB514_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test515(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test515:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB515_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB515_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB515_1
+; PPC64LE-NEXT: .LBB515_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test516(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test516:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB516_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpd 4, 3
+; PPC64LE-NEXT: bge 0, .LBB516_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB516_1
+; PPC64LE-NEXT: .LBB516_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test517(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test517:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB517_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB517_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB517_1
+; PPC64LE-NEXT: .LBB517_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test518(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test518:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB518_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB518_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB518_1
+; PPC64LE-NEXT: .LBB518_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test519(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test519:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB519_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpd 4, 5
+; PPC64LE-NEXT: bge 0, .LBB519_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB519_1
+; PPC64LE-NEXT: .LBB519_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw min i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test520(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test520:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB520_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB520_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB520_1
+; PPC64LE-NEXT: .LBB520_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test521(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test521:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB521_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB521_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB521_1
+; PPC64LE-NEXT: .LBB521_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test522(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test522:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB522_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB522_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB522_1
+; PPC64LE-NEXT: .LBB522_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test523(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test523:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB523_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB523_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB523_1
+; PPC64LE-NEXT: .LBB523_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test524(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test524:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB524_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB524_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB524_1
+; PPC64LE-NEXT: .LBB524_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test525(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test525:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB525_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB525_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB525_1
+; PPC64LE-NEXT: .LBB525_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test526(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test526:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB526_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB526_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB526_1
+; PPC64LE-NEXT: .LBB526_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test527(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test527:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB527_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB527_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB527_1
+; PPC64LE-NEXT: .LBB527_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test528(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test528:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB528_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB528_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB528_1
+; PPC64LE-NEXT: .LBB528_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test529(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test529:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB529_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB529_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB529_1
+; PPC64LE-NEXT: .LBB529_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test530(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test530:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB530_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB530_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB530_1
+; PPC64LE-NEXT: .LBB530_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test531(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test531:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB531_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: ble 0, .LBB531_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB531_1
+; PPC64LE-NEXT: .LBB531_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test532(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test532:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB532_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB532_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB532_1
+; PPC64LE-NEXT: .LBB532_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test533(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test533:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB533_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB533_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB533_1
+; PPC64LE-NEXT: .LBB533_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test534(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test534:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB534_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: ble 0, .LBB534_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB534_1
+; PPC64LE-NEXT: .LBB534_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test535(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test535:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB535_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB535_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB535_1
+; PPC64LE-NEXT: .LBB535_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test536(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test536:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB536_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpld 4, 3
+; PPC64LE-NEXT: ble 0, .LBB536_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB536_1
+; PPC64LE-NEXT: .LBB536_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test537(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test537:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB537_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB537_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB537_1
+; PPC64LE-NEXT: .LBB537_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test538(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test538:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB538_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB538_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB538_1
+; PPC64LE-NEXT: .LBB538_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test539(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test539:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB539_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: ble 0, .LBB539_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB539_1
+; PPC64LE-NEXT: .LBB539_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umax i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
+
+define i8 @test540(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test540:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB540_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB540_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB540_1
+; PPC64LE-NEXT: .LBB540_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val singlethread monotonic
+ ret i8 %ret
+}
+
+define i8 @test541(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test541:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB541_1:
+; PPC64LE-NEXT: lbarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB541_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB541_1
+; PPC64LE-NEXT: .LBB541_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val singlethread acquire
+ ret i8 %ret
+}
+
+define i8 @test542(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test542:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB542_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB542_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB542_1
+; PPC64LE-NEXT: .LBB542_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val singlethread release
+ ret i8 %ret
+}
+
+define i8 @test543(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test543:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB543_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB543_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB543_1
+; PPC64LE-NEXT: .LBB543_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val singlethread acq_rel
+ ret i8 %ret
+}
+
+define i8 @test544(i8* %ptr, i8 %val) {
+; PPC64LE-LABEL: test544:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB544_1:
+; PPC64LE-NEXT: lbarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB544_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stbcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB544_1
+; PPC64LE-NEXT: .LBB544_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i8* %ptr, i8 %val singlethread seq_cst
+ ret i8 %ret
+}
+
+define i16 @test545(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test545:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB545_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB545_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB545_1
+; PPC64LE-NEXT: .LBB545_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val singlethread monotonic
+ ret i16 %ret
+}
+
+define i16 @test546(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test546:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB546_1:
+; PPC64LE-NEXT: lharx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB546_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB546_1
+; PPC64LE-NEXT: .LBB546_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val singlethread acquire
+ ret i16 %ret
+}
+
+define i16 @test547(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test547:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB547_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB547_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB547_1
+; PPC64LE-NEXT: .LBB547_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val singlethread release
+ ret i16 %ret
+}
+
+define i16 @test548(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test548:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB548_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB548_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB548_1
+; PPC64LE-NEXT: .LBB548_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val singlethread acq_rel
+ ret i16 %ret
+}
+
+define i16 @test549(i16* %ptr, i16 %val) {
+; PPC64LE-LABEL: test549:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB549_1:
+; PPC64LE-NEXT: lharx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB549_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: sthcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB549_1
+; PPC64LE-NEXT: .LBB549_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i16* %ptr, i16 %val singlethread seq_cst
+ ret i16 %ret
+}
+
+define i32 @test550(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test550:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB550_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB550_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB550_1
+; PPC64LE-NEXT: .LBB550_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val singlethread monotonic
+ ret i32 %ret
+}
+
+define i32 @test551(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test551:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB551_1:
+; PPC64LE-NEXT: lwarx 3, 0, 5
+; PPC64LE-NEXT: cmplw 4, 3
+; PPC64LE-NEXT: bge 0, .LBB551_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB551_1
+; PPC64LE-NEXT: .LBB551_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val singlethread acquire
+ ret i32 %ret
+}
+
+define i32 @test552(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test552:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB552_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB552_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB552_1
+; PPC64LE-NEXT: .LBB552_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val singlethread release
+ ret i32 %ret
+}
+
+define i32 @test553(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test553:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB553_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB553_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB553_1
+; PPC64LE-NEXT: .LBB553_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val singlethread acq_rel
+ ret i32 %ret
+}
+
+define i32 @test554(i32* %ptr, i32 %val) {
+; PPC64LE-LABEL: test554:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB554_1:
+; PPC64LE-NEXT: lwarx 5, 0, 3
+; PPC64LE-NEXT: cmplw 4, 5
+; PPC64LE-NEXT: bge 0, .LBB554_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stwcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB554_1
+; PPC64LE-NEXT: .LBB554_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i32* %ptr, i32 %val singlethread seq_cst
+ ret i32 %ret
+}
+
+define i64 @test555(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test555:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: .LBB555_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB555_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB555_1
+; PPC64LE-NEXT: .LBB555_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val singlethread monotonic
+ ret i64 %ret
+}
+
+define i64 @test556(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test556:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: mr 5, 3
+; PPC64LE-NEXT: .LBB556_1:
+; PPC64LE-NEXT: ldarx 3, 0, 5
+; PPC64LE-NEXT: cmpld 4, 3
+; PPC64LE-NEXT: bge 0, .LBB556_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 5
+; PPC64LE-NEXT: bne 0, .LBB556_1
+; PPC64LE-NEXT: .LBB556_3:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val singlethread acquire
+ ret i64 %ret
+}
+
+define i64 @test557(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test557:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB557_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB557_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB557_1
+; PPC64LE-NEXT: .LBB557_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val singlethread release
+ ret i64 %ret
+}
+
+define i64 @test558(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test558:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: .LBB558_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB558_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB558_1
+; PPC64LE-NEXT: .LBB558_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val singlethread acq_rel
+ ret i64 %ret
+}
+
+define i64 @test559(i64* %ptr, i64 %val) {
+; PPC64LE-LABEL: test559:
+; PPC64LE: # BB#0:
+; PPC64LE-NEXT: sync
+; PPC64LE-NEXT: .LBB559_1:
+; PPC64LE-NEXT: ldarx 5, 0, 3
+; PPC64LE-NEXT: cmpld 4, 5
+; PPC64LE-NEXT: bge 0, .LBB559_3
+; PPC64LE-NEXT: # BB#2:
+; PPC64LE-NEXT: stdcx. 4, 0, 3
+; PPC64LE-NEXT: bne 0, .LBB559_1
+; PPC64LE-NEXT: .LBB559_3:
+; PPC64LE-NEXT: mr 3, 5
+; PPC64LE-NEXT: lwsync
+; PPC64LE-NEXT: blr
+ %ret = atomicrmw umin i64* %ptr, i64 %val singlethread seq_cst
+ ret i64 %ret
+}
diff --git a/test/CodeGen/PowerPC/bitcasts-direct-move.ll b/test/CodeGen/PowerPC/bitcasts-direct-move.ll
index 79da5cb68740..d6c7dd3804ff 100644
--- a/test/CodeGen/PowerPC/bitcasts-direct-move.ll
+++ b/test/CodeGen/PowerPC/bitcasts-direct-move.ll
@@ -20,7 +20,7 @@ entry:
ret i64 %0
; CHECK-P7: stxsdx 1,
; CHECK-P7: ld 3,
-; CHECK: mfvsrd 3, 1
+; CHECK: mffprd 3, 1
}
define float @i32tof32(i32 signext %a) {
@@ -60,7 +60,7 @@ entry:
ret i64 %0
; CHECK-P7: stxsdx 1,
; CHECK-P7: ld 3,
-; CHECK: mfvsrd 3, 1
+; CHECK: mffprd 3, 1
}
define float @i32utof32(i32 zeroext %a) {
diff --git a/test/CodeGen/PowerPC/branch_coalesce.ll b/test/CodeGen/PowerPC/branch_coalesce.ll
new file mode 100644
index 000000000000..deb6d898c2e0
--- /dev/null
+++ b/test/CodeGen/PowerPC/branch_coalesce.ll
@@ -0,0 +1,31 @@
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64le-unknown-linux-gnu -verify-machineinstrs -enable-branch-coalesce=true < %s | FileCheck %s
+; RUN: llc -mcpu=pwr8 -mtriple=powerpc64-unknown-linux-gnu -verify-machineinstrs -enable-branch-coalesce=true < %s | FileCheck %s
+
+; Function Attrs: nounwind
+define double @testBranchCoal(double %a, double %b, double %c, i32 %x) {
+entry:
+ %test = icmp eq i32 %x, 0
+ %tmp1 = select i1 %test, double %a, double 2.000000e-03
+ %tmp2 = select i1 %test, double %b, double 0.000000e+00
+ %tmp3 = select i1 %test, double %c, double 5.000000e-03
+
+ %res1 = fadd double %tmp1, %tmp2
+ %result = fadd double %res1, %tmp3
+ ret double %result
+
+; CHECK-LABEL: @testBranchCoal
+; CHECK: cmplwi [[CMPR:[0-7]+]], 6, 0
+; CHECK: beq [[CMPR]], .LBB[[LAB1:[0-9_]+]]
+; CHECK-DAG: addis [[LD1REG:[0-9]+]], 2, .LCPI0_0@toc@ha
+; CHECK-DAG: addis [[LD2REG:[0-9]+]], 2, .LCPI0_1@toc@ha
+; CHECK-DAG: xxlxor 2, 2, 2
+; CHECK-NOT: beq
+; CHECK-DAG: addi [[LD1BASE:[0-9]+]], [[LD1REG]]
+; CHECK-DAG: addi [[LD2BASE:[0-9]+]], [[LD2REG]]
+; CHECK-DAG: lxsdx 1, 0, [[LD1BASE]]
+; CHECK-DAG: lxsdx 3, 0, [[LD2BASE]]
+; CHECK: .LBB[[LAB1]]
+; CHECK: xsadddp 0, 1, 2
+; CHECK: xsadddp 1, 0, 3
+; CHECK: blr
+}
diff --git a/test/CodeGen/PowerPC/complex-return.ll b/test/CodeGen/PowerPC/complex-return.ll
index f6097e655128..ec87a89b1108 100644
--- a/test/CodeGen/PowerPC/complex-return.ll
+++ b/test/CodeGen/PowerPC/complex-return.ll
@@ -24,10 +24,10 @@ entry:
}
; CHECK-LABEL: foo:
-; CHECK: lfd 1
-; CHECK: lfd 2
-; CHECK: lfd 3
-; CHECK: lfd 4
+; CHECK-DAG: lfd 1
+; CHECK-DAG: lfd 2
+; CHECK-DAG: lfd 3
+; CHECK_DAG: lfd 4
define { float, float } @oof() nounwind {
entry:
@@ -50,6 +50,6 @@ entry:
}
; CHECK-LABEL: oof:
-; CHECK: lfs 2
-; CHECK: lfs 1
+; CHECK-DAG: lfs 2
+; CHECK-DAG: lfs 1
diff --git a/test/CodeGen/PowerPC/crbit-asm.ll b/test/CodeGen/PowerPC/crbit-asm.ll
index 11999670bd6a..c156d3bcc087 100644
--- a/test/CodeGen/PowerPC/crbit-asm.ll
+++ b/test/CodeGen/PowerPC/crbit-asm.ll
@@ -1,5 +1,8 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -O1 -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
+; RUN: llc -verify-machineinstrs -O1 -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
+
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -18,6 +21,10 @@ entry:
; CHECK-DAG: crand [[REG3:[0-9]+]], [[REG2]], 1
; CHECK-DAG: li [[REG4:[0-9]+]], 1
; CHECK: isel 3, [[REG4]], [[REG1]], [[REG3]]
+; CHECK-NO-ISEL-LABEL: @testi1
+; CHECK-NO-ISEL: bclr 12, 20, 0
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
diff --git a/test/CodeGen/PowerPC/crbits.ll b/test/CodeGen/PowerPC/crbits.ll
index 97f02ef31b3e..a85237195c5e 100644
--- a/test/CodeGen/PowerPC/crbits.ll
+++ b/test/CodeGen/PowerPC/crbits.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -19,6 +20,12 @@ entry:
; CHECK: crnor
; CHECK: crnand [[REG4:[0-9]+]],
; CHECK: isel 3, 0, [[REG1]], [[REG4]]
+; CHECK-NO-ISEL-LABEL: @test1
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 0, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -135,7 +142,7 @@ entry:
ret i32 %cond
; CHECK-LABEL: @exttest7
-; CHECK-DAG: cmplwi {{[0-9]+}}, 3, 5
+; CHECK-DAG: cmpwi {{[0-9]+}}, 3, 5
; CHECK-DAG: li [[REG1:[0-9]+]], 8
; CHECK-DAG: li [[REG2:[0-9]+]], 7
; CHECK: isel 3, [[REG2]], [[REG1]],
diff --git a/test/CodeGen/PowerPC/ctrloop-i128.ll b/test/CodeGen/PowerPC/ctrloop-i128.ll
new file mode 100644
index 000000000000..8c1e0c160d30
--- /dev/null
+++ b/test/CodeGen/PowerPC/ctrloop-i128.ll
@@ -0,0 +1,34 @@
+; RUN: llc -O1 -verify-machineinstrs < %s
+target datalayout = "E-m:e-i64:64-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+
+; Function Attrs: uwtable
+define fastcc void @_Crash_Fn() unnamed_addr #0 {
+entry-block:
+ br label %_Label_0
+
+_Label_0: ; preds = %_Label_0, %entry-block
+ %result.0138 = phi i128 [ %5, %_Label_0 ], [ 0, %entry-block ]
+ %iter.sroa.0.0137 = phi i8* [ %0, %_Label_0 ], [ undef, %entry-block ]
+ %0 = getelementptr inbounds i8, i8* %iter.sroa.0.0137, i64 1
+ %1 = tail call { i128, i1 } @llvm.smul.with.overflow.i128(i128 %result.0138, i128 undef) #2
+ %2 = extractvalue { i128, i1 } %1, 0
+ %3 = tail call { i128, i1 } @llvm.sadd.with.overflow.i128(i128 %2, i128 0) #2
+ %4 = extractvalue { i128, i1 } %3, 1
+ %5 = extractvalue { i128, i1 } %3, 0
+ %6 = icmp eq i8* %0, null
+ br i1 %6, label %bb66.loopexit, label %_Label_0
+
+bb66.loopexit: ; preds = %_Label_0
+ unreachable
+}
+
+; Function Attrs: nounwind readnone
+declare { i128, i1 } @llvm.sadd.with.overflow.i128(i128, i128) #1
+
+; Function Attrs: nounwind readnone
+declare { i128, i1 } @llvm.smul.with.overflow.i128(i128, i128) #1
+
+attributes #0 = { uwtable }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
diff --git a/test/CodeGen/PowerPC/ctrloop-intrin.ll b/test/CodeGen/PowerPC/ctrloop-intrin.ll
index 3a6e8855971b..6ae5d3368c1a 100644
--- a/test/CodeGen/PowerPC/ctrloop-intrin.ll
+++ b/test/CodeGen/PowerPC/ctrloop-intrin.ll
@@ -17,10 +17,10 @@ target triple = "powerpc64le--linux-gnu"
@.str.11.98 = external hidden unnamed_addr constant [3 x i8], align 1
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
; Function Attrs: nounwind
declare i8* @halide_string_to_string(i8*, i8*, i8*) #1
@@ -36,7 +36,7 @@ entry:
%buf = alloca [512 x i8], align 1
store double %arg, double* %arg.addr, align 8, !tbaa !4
%0 = bitcast i64* %bits to i8*
- call void @llvm.lifetime.start(i64 8, i8* %0) #0
+ call void @llvm.lifetime.start.p0i8(i64 8, i8* %0) #0
store i64 0, i64* %bits, align 8, !tbaa !8
%1 = bitcast double* %arg.addr to i8*
%call = call i8* @memcpy(i8* %0, i8* %1, i64 8) #2
@@ -245,7 +245,7 @@ if.end.105: ; preds = %if.end.84, %if.end.
%integer_exponent.0 = phi i32 [ 0, %if.end.84 ], [ %sub70, %if.end.66 ]
%fractional_part.2 = phi i64 [ %.fractional_part.0, %if.end.84 ], [ 0, %if.end.66 ]
%7 = bitcast [512 x i8]* %buf to i8*
- call void @llvm.lifetime.start(i64 512, i8* %7) #0
+ call void @llvm.lifetime.start.p0i8(i64 512, i8* %7) #0
%add.ptr = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i64 0, i64 512
%add.ptr106 = getelementptr inbounds [512 x i8], [512 x i8]* %buf, i64 0, i64 480
%call109 = call i8* @halide_int64_to_string(i8* %add.ptr106, i8* %add.ptr, i64 %integer_part.2, i32 1) #3
@@ -272,7 +272,7 @@ for.cond.cleanup: ; preds = %if.end.138, %if.end
%call142 = call i8* @halide_string_to_string(i8* %dst.addr.0, i8* %end, i8* %int_part_ptr.0.lcssa) #3
%call143 = call i8* @halide_string_to_string(i8* %call142, i8* %end, i8* getelementptr inbounds ([2 x i8], [2 x i8]* @.str.9.96, i64 0, i64 0)) #3
%call144 = call i8* @halide_int64_to_string(i8* %call143, i8* %end, i64 %fractional_part.2, i32 6) #3
- call void @llvm.lifetime.end(i64 512, i8* %9) #0
+ call void @llvm.lifetime.end.p0i8(i64 512, i8* %9) #0
br label %cleanup.148
for.cond.cleanup.115: ; preds = %for.body.116
@@ -315,7 +315,7 @@ if.end.138: ; preds = %if.then.136, %for.c
cleanup.148: ; preds = %for.cond.cleanup, %if.then.64, %if.end.59, %if.else.30, %if.then.28, %if.else.24, %if.then.22, %if.else.13, %if.then.11, %if.else, %if.then.6
%retval.1 = phi i8* [ %call7, %if.then.6 ], [ %call8, %if.else ], [ %call12, %if.then.11 ], [ %call14, %if.else.13 ], [ %call23, %if.then.22 ], [ %call25, %if.else.24 ], [ %call29, %if.then.28 ], [ %call31, %if.else.30 ], [ %call65, %if.then.64 ], [ %call61, %if.end.59 ], [ %call144, %for.cond.cleanup ]
%13 = bitcast i64* %bits to i8*
- call void @llvm.lifetime.end(i64 8, i8* %13) #0
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* %13) #0
ret i8* %retval.1
}
diff --git a/test/CodeGen/PowerPC/expand-contiguous-isel.ll b/test/CodeGen/PowerPC/expand-contiguous-isel.ll
new file mode 100644
index 000000000000..5fe69ebcf58e
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-contiguous-isel.ll
@@ -0,0 +1,151 @@
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+; This file mainly tests that one of the ISEL instruction in the group uses the same register for operand RT, RA, RB
+; RUN: llc -verify-machineinstrs -O2 -ppc-asm-full-reg-names -mcpu=pwr7 -ppc-gen-isel=true < %s | FileCheck %s --check-prefix=CHECK-GEN-ISEL-TRUE
+; RUN: llc -verify-machineinstrs -O2 -ppc-asm-full-reg-names -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck %s --implicit-check-not isel
+; Function Attrs: norecurse nounwind readnone
+@.str = private unnamed_addr constant [3 x i8] c"]]\00", align 1
+@.str.1 = private unnamed_addr constant [35 x i8] c"Index < Length && \22Invalid index!\22\00", align 1
+@.str.2 = private unnamed_addr constant [50 x i8] c"/home/jtony/src/llvm/include/llvm/ADT/StringRef.h\00", align 1
+@__PRETTY_FUNCTION__._ZNK4llvm9StringRefixEm = private unnamed_addr constant [47 x i8] c"char llvm::StringRef::operator[](size_t) const\00", align 1
+@.str.3 = private unnamed_addr constant [95 x i8] c"(data || length == 0) && \22StringRef cannot be built from a NULL argument with non-null length\22\00", align 1
+@__PRETTY_FUNCTION__._ZN4llvm9StringRefC2EPKcm = private unnamed_addr constant [49 x i8] c"llvm::StringRef::StringRef(const char *, size_t)\00", align 1
+; Function Attrs: nounwind
+define i64 @_Z3fn1N4llvm9StringRefE([2 x i64] %Str.coerce) local_unnamed_addr #0 {
+entry:
+ %Str.coerce.fca.0.extract = extractvalue [2 x i64] %Str.coerce, 0
+ %Str.coerce.fca.1.extract = extractvalue [2 x i64] %Str.coerce, 1
+ br label %while.cond.outer
+while.cond.outer: ; preds = %_ZNK4llvm9StringRef6substrEmm.exit, %entry
+ %Str.sroa.0.0.ph = phi i64 [ %8, %_ZNK4llvm9StringRef6substrEmm.exit ], [ %Str.coerce.fca.0.extract, %entry ]
+ %.sink.ph = phi i64 [ %sub.i, %_ZNK4llvm9StringRef6substrEmm.exit ], [ %Str.coerce.fca.1.extract, %entry ]
+ %BracketDepth.0.ph = phi i64 [ %BracketDepth.1, %_ZNK4llvm9StringRef6substrEmm.exit ], [ undef, %entry ]
+ %cmp65 = icmp eq i64 %BracketDepth.0.ph, 0
+ br i1 %cmp65, label %while.cond.us.preheader, label %while.cond.preheader
+while.cond.us.preheader: ; preds = %while.cond.outer
+ br label %while.cond.us
+while.cond.preheader: ; preds = %while.cond.outer
+ %cmp.i34129 = icmp eq i64 %.sink.ph, 0
+ br i1 %cmp.i34129, label %cond.false.i.loopexit135, label %_ZNK4llvm9StringRefixEm.exit.preheader
+_ZNK4llvm9StringRefixEm.exit.preheader: ; preds = %while.cond.preheader
+ br label %_ZNK4llvm9StringRefixEm.exit
+while.cond.us: ; preds = %while.cond.us.preheader, %_ZNK4llvm9StringRef6substrEmm.exit50.us
+ %Str.sroa.0.0.us = phi i64 [ %3, %_ZNK4llvm9StringRef6substrEmm.exit50.us ], [ %Str.sroa.0.0.ph, %while.cond.us.preheader ]
+ %.sink.us = phi i64 [ %sub.i41.us, %_ZNK4llvm9StringRef6substrEmm.exit50.us ], [ %.sink.ph, %while.cond.us.preheader ]
+ %cmp.i30.us = icmp ult i64 %.sink.us, 2
+ br i1 %cmp.i30.us, label %if.end.us, label %if.end.i.i.us
+if.end.i.i.us: ; preds = %while.cond.us
+ %0 = inttoptr i64 %Str.sroa.0.0.us to i8*
+ %call.i.i.us = tail call signext i32 @memcmp(i8* %0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @.str, i64 0, i64 0), i64 2) #3
+ %phitmp.i.us = icmp eq i32 %call.i.i.us, 0
+ br i1 %phitmp.i.us, label %if.then, label %_ZNK4llvm9StringRefixEm.exit.us
+if.end.us: ; preds = %while.cond.us
+ %cmp.i34.us = icmp eq i64 %.sink.us, 0
+ br i1 %cmp.i34.us, label %cond.false.i.loopexit, label %_ZNK4llvm9StringRefixEm.exit.us
+_ZNK4llvm9StringRefixEm.exit.us: ; preds = %if.end.i.i.us, %if.end.us
+ %1 = inttoptr i64 %Str.sroa.0.0.us to i8*
+ %2 = load i8, i8* %1, align 1, !tbaa !2
+ switch i8 %2, label %_ZNK4llvm9StringRef6substrEmm.exit.loopexit [
+ i8 92, label %if.then4.us
+ i8 93, label %if.then9
+ ]
+if.then4.us: ; preds = %_ZNK4llvm9StringRefixEm.exit.us
+ %.sroa.speculated12.i38.us = select i1 %cmp.i30.us, i64 %.sink.us, i64 2
+ %add.ptr.i40.us = getelementptr inbounds i8, i8* %1, i64 %.sroa.speculated12.i38.us
+ %sub.i41.us = sub i64 %.sink.us, %.sroa.speculated12.i38.us
+ %tobool.i.i44.us = icmp ne i8* %add.ptr.i40.us, null
+ %cmp.i4.i45.us = icmp eq i64 %sub.i41.us, 0
+ %or.cond.i.i46.us = or i1 %tobool.i.i44.us, %cmp.i4.i45.us
+ br i1 %or.cond.i.i46.us, label %_ZNK4llvm9StringRef6substrEmm.exit50.us, label %cond.false.i.i47.loopexit
+_ZNK4llvm9StringRef6substrEmm.exit50.us: ; preds = %if.then4.us
+ %3 = ptrtoint i8* %add.ptr.i40.us to i64
+ br label %while.cond.us
+if.then: ; preds = %if.end.i.i.us
+ ret i64 undef
+cond.false.i.loopexit: ; preds = %if.end.us
+ br label %cond.false.i
+cond.false.i.loopexit134: ; preds = %_ZNK4llvm9StringRef6substrEmm.exit50
+ br label %cond.false.i
+cond.false.i.loopexit135: ; preds = %while.cond.preheader
+ br label %cond.false.i
+cond.false.i: ; preds = %cond.false.i.loopexit135, %cond.false.i.loopexit134, %cond.false.i.loopexit
+ tail call void @__assert_fail(i8* getelementptr inbounds ([35 x i8], [35 x i8]* @.str.1, i64 0, i64 0), i8* getelementptr inbounds ([50 x i8], [50 x i8]* @.str.2, i64 0, i64 0), i32 zeroext 225, i8* getelementptr inbounds ([47 x i8], [47 x i8]* @__PRETTY_FUNCTION__._ZNK4llvm9StringRefixEm, i64 0, i64 0)) #4
+ unreachable
+_ZNK4llvm9StringRefixEm.exit: ; preds = %_ZNK4llvm9StringRefixEm.exit.preheader, %_ZNK4llvm9StringRef6substrEmm.exit50
+ %.sink131 = phi i64 [ %sub.i41, %_ZNK4llvm9StringRef6substrEmm.exit50 ], [ %.sink.ph, %_ZNK4llvm9StringRefixEm.exit.preheader ]
+ %Str.sroa.0.0130 = phi i64 [ %6, %_ZNK4llvm9StringRef6substrEmm.exit50 ], [ %Str.sroa.0.0.ph, %_ZNK4llvm9StringRefixEm.exit.preheader ]
+ %4 = inttoptr i64 %Str.sroa.0.0130 to i8*
+ %5 = load i8, i8* %4, align 1, !tbaa !2
+ switch i8 %5, label %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 [
+ i8 92, label %if.then4
+ i8 93, label %if.end10
+ ]
+if.then4: ; preds = %_ZNK4llvm9StringRefixEm.exit
+ %cmp.i.i37 = icmp ult i64 %.sink131, 2
+ %.sroa.speculated12.i38 = select i1 %cmp.i.i37, i64 %.sink131, i64 2
+ %add.ptr.i40 = getelementptr inbounds i8, i8* %4, i64 %.sroa.speculated12.i38
+ %sub.i41 = sub i64 %.sink131, %.sroa.speculated12.i38
+ %tobool.i.i44 = icmp ne i8* %add.ptr.i40, null
+ %cmp.i4.i45 = icmp eq i64 %sub.i41, 0
+ %or.cond.i.i46 = or i1 %tobool.i.i44, %cmp.i4.i45
+ br i1 %or.cond.i.i46, label %_ZNK4llvm9StringRef6substrEmm.exit50, label %cond.false.i.i47.loopexit133
+cond.false.i.i47.loopexit: ; preds = %if.then4.us
+ br label %cond.false.i.i47
+cond.false.i.i47.loopexit133: ; preds = %if.then4
+ br label %cond.false.i.i47
+cond.false.i.i47: ; preds = %cond.false.i.i47.loopexit133, %cond.false.i.i47.loopexit
+ tail call void @__assert_fail(i8* getelementptr inbounds ([95 x i8], [95 x i8]* @.str.3, i64 0, i64 0), i8* getelementptr inbounds ([50 x i8], [50 x i8]* @.str.2, i64 0, i64 0), i32 zeroext 90, i8* getelementptr inbounds ([49 x i8], [49 x i8]* @__PRETTY_FUNCTION__._ZN4llvm9StringRefC2EPKcm, i64 0, i64 0)) #4
+ unreachable
+_ZNK4llvm9StringRef6substrEmm.exit50: ; preds = %if.then4
+ %6 = ptrtoint i8* %add.ptr.i40 to i64
+ %cmp.i34 = icmp eq i64 %sub.i41, 0
+ br i1 %cmp.i34, label %cond.false.i.loopexit134, label %_ZNK4llvm9StringRefixEm.exit
+if.then9: ; preds = %_ZNK4llvm9StringRefixEm.exit.us
+ tail call void @exit(i32 signext 1) #4
+ unreachable
+if.end10: ; preds = %_ZNK4llvm9StringRefixEm.exit
+ %dec = add i64 %BracketDepth.0.ph, -1
+ br label %_ZNK4llvm9StringRef6substrEmm.exit
+_ZNK4llvm9StringRef6substrEmm.exit.loopexit: ; preds = %_ZNK4llvm9StringRefixEm.exit.us
+ br label %_ZNK4llvm9StringRef6substrEmm.exit
+_ZNK4llvm9StringRef6substrEmm.exit.loopexit132: ; preds = %_ZNK4llvm9StringRefixEm.exit
+ br label %_ZNK4llvm9StringRef6substrEmm.exit
+_ZNK4llvm9StringRef6substrEmm.exit: ; preds = %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit, %if.end10
+ %.sink76 = phi i64 [ %.sink131, %if.end10 ], [ %.sink.us, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit ], [ %.sink131, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 ]
+ %7 = phi i8* [ %4, %if.end10 ], [ %1, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit ], [ %4, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 ]
+ %BracketDepth.1 = phi i64 [ %dec, %if.end10 ], [ 0, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit ], [ %BracketDepth.0.ph, %_ZNK4llvm9StringRef6substrEmm.exit.loopexit132 ]
+ %sub.i = add i64 %.sink76, -1
+ %add.ptr.i = getelementptr inbounds i8, i8* %7, i64 1
+ %8 = ptrtoint i8* %add.ptr.i to i64
+ br label %while.cond.outer
+
+; CHECK-LABEL: @_Z3fn1N4llvm9StringRefE
+; CHECK-GEN-ISEL-TRUE: isel [[SAME:r[0-9]+]], [[SAME]], [[SAME]]
+; CHECK-GEN-ISEL-TRUE: isel [[SAME:r[0-9]+]], {{r[0-9]+}}, [[SAME]]
+; CHECK: bc 12, 2, [[TRUE:.LBB[0-9]+]]
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi {{r[0-9]+}}, {{r[0-9]+}}, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+}
+
+
+
+; Function Attrs: noreturn nounwind
+declare void @exit(i32 signext) local_unnamed_addr #1
+; Function Attrs: nounwind readonly
+declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #2
+; Function Attrs: noreturn nounwind
+declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*) local_unnamed_addr #1
+attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx,-power9-vector,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { noreturn nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx,-power9-vector,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #2 = { nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx,-power9-vector,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #3 = { nounwind readonly }
+attributes #4 = { noreturn nounwind }
+!llvm.module.flags = !{!0}
+!llvm.ident = !{!1}
+!0 = !{i32 1, !"PIC Level", i32 2}
+!1 = !{!"clang version 4.0.0 (trunk 286863) (llvm/trunk 286967)"}
+!2 = !{!3, !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C++ TBAA"}
diff --git a/test/CodeGen/PowerPC/expand-isel-1.mir b/test/CodeGen/PowerPC/expand-isel-1.mir
new file mode 100644
index 000000000000..e666ad47fca0
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-1.mir
@@ -0,0 +1,57 @@
+# This file tests the scenario: ISEL R0, ZERO, R0, CR
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r0 = ISEL %zero, %r0, %cr0gt
+ ; CHECK-LABEL: testExpandISEL
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK-NEXT: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r0 = ADDI %zero, 0
+
+ %x3 = EXTSW_32_64 %r0
+
+...
+
diff --git a/test/CodeGen/PowerPC/expand-isel-2.mir b/test/CodeGen/PowerPC/expand-isel-2.mir
new file mode 100644
index 000000000000..8e9c3a25e60c
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-2.mir
@@ -0,0 +1,57 @@
+# This file tests the scenario: ISEL RX, ZERO, RY, CR (X != 0 && Y != 0)
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+ - { reg: '%x4' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3, %x4
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r3 = ISEL %zero, %r4, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r3 = ORI %r4, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r3 = ADDI %zero, 0
+
+ %x3 = EXTSW_32_64 %r3
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-3.mir b/test/CodeGen/PowerPC/expand-isel-3.mir
new file mode 100644
index 000000000000..c8987266f476
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-3.mir
@@ -0,0 +1,58 @@
+# This file tests the scenario: ISEL RX, RY, R0, CR (X != 0 && Y != 0)
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+ - { reg: '%x4' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3, %x4
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r3 = ISEL %r4, %r0, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r3 = ORI %r0, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r3 = ADDI %r4, 0
+
+ %x3 = EXTSW_32_64 %r3
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-4.mir b/test/CodeGen/PowerPC/expand-isel-4.mir
new file mode 100644
index 000000000000..83624f7c1e34
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-4.mir
@@ -0,0 +1,59 @@
+# This file tests the scenario: ISEL R0, ZERO, RX, CR (X != 0)
+# It also tests redundant liveins (%x7) and killed registers.
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+ - { reg: '%x7' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3, %x7
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r0 = ISEL killed %zero, killed %r5, killed %cr0gt, implicit killed %cr0
+ ; CHECK: BC killed %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r0 = ORI killed %r5, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r0 = ADDI killed %zero, 0
+
+ %x0 = EXTSW_32_64 killed %r0
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-5.mir b/test/CodeGen/PowerPC/expand-isel-5.mir
new file mode 100644
index 000000000000..7a7130f80cf8
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-5.mir
@@ -0,0 +1,54 @@
+# This file tests the scenario: ISEL R0, RX, R0, CR (X != 0)
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r0 = ISEL %r5, %r0, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r0 = ADDI %r5, 0
+ %x3 = EXTSW_32_64 %r0
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-6.mir b/test/CodeGen/PowerPC/expand-isel-6.mir
new file mode 100644
index 000000000000..5aed399e677a
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-6.mir
@@ -0,0 +1,57 @@
+# This file tests the scenario when ISEL is the last instruction of the last
+# Basic Block, i.e., the BB cannot fall through to its successor situation.
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x0' }
+ - { reg: '%x3' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x0, %x3
+
+ %r5 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r3 = ISEL %zero, %r0, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r3 = ORI %r0, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r3 = ADDI %zero, 0
+
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-7.mir b/test/CodeGen/PowerPC/expand-isel-7.mir
new file mode 100644
index 000000000000..4043a45a2e70
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-7.mir
@@ -0,0 +1,58 @@
+# This file tests the scenario: ISEL RX, RY, RZ, CR (X != 0 && Y != 0, Z != 0)
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x3' }
+ - { reg: '%x4' }
+ - { reg: '%x5' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x3, %x4, %x5
+
+ %r4 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r5 = ISEL %r3, %r4, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r5 = ORI %r4, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r5 = ADDI %r3, 0
+
+ %x5 = EXTSW_32_64 %r5
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel-8.mir b/test/CodeGen/PowerPC/expand-isel-8.mir
new file mode 100644
index 000000000000..c8b857e69791
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel-8.mir
@@ -0,0 +1,65 @@
+# This file tests combining three consecutive ISELs scenario.
+# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
+
+--- |
+ target datalayout = "E-m:e-i64:64-n32:64"
+ target triple = "powerpc64-unknown-linux-gnu"
+ define signext i32 @testExpandISEL(i32 signext %i, i32 signext %j) {
+ entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+ }
+
+...
+---
+name: testExpandISEL
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x3' }
+ - { reg: '%x4' }
+ - { reg: '%x5' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+body: |
+ bb.0.entry:
+ liveins: %x3, %x4, %x5
+
+ %r4 = ADDI %r3, 1
+ %cr0 = CMPWI %r3, 0
+ %r5 = ISEL %r3, %r4, %cr0gt
+ %r3 = ISEL %r4, %r5, %cr0gt
+ %r4 = ISEL %r3, %r5, %cr0gt
+ ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: %[[FALSE:bb.[0-9]+]]
+ ; CHECK: %r5 = ORI %r4, 0
+ ; CHECK: %r3 = ORI %r5, 0
+ ; CHECK: %r4 = ORI %r5, 0
+ ; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
+ ; CHECK: [[TRUE]]
+ ; CHECK: %r5 = ADDI %r3, 0
+ ; CHECK: %r3 = ADDI %r4, 0
+ ; CHECK: %r4 = ADDI %r3, 0
+
+ %x5 = EXTSW_32_64 %r5
+ %x3 = EXTSW_32_64 %r3
+
+...
diff --git a/test/CodeGen/PowerPC/expand-isel.ll b/test/CodeGen/PowerPC/expand-isel.ll
new file mode 100644
index 000000000000..553cc3c372e5
--- /dev/null
+++ b/test/CodeGen/PowerPC/expand-isel.ll
@@ -0,0 +1,227 @@
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
+target triple = "powerpc64-unknown-linux-gnu"
+; RUN: llc -verify-machineinstrs -O2 -ppc-asm-full-reg-names -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck %s --implicit-check-not isel
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELToIfElse(i32 signext %i, i32 signext %j) {
+entry:
+ %cmp = icmp sgt i32 %i, 0
+ %add = add nsw i32 %i, 1
+ %cond = select i1 %cmp, i32 %add, i32 %j
+ ret i32 %cond
+
+; CHECK-LABEL: @testExpandISELToIfElse
+; CHECK: addi r5, r3, 1
+; CHECK-NEXT: cmpwi cr0, r3, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK: ori r3, r4, 0
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r3, r5, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: extsw r3, r3
+; CHECK-NEXT: blr
+}
+
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELToIf(i32 signext %i, i32 signext %j) {
+entry:
+ %cmp = icmp sgt i32 %i, 0
+ %cond = select i1 %cmp, i32 %j, i32 %i
+ ret i32 %cond
+
+; CHECK-LABEL: @testExpandISELToIf
+; CHECK: cmpwi r3, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NEXT: blr
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r3, r4, 0
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELToElse(i32 signext %i, i32 signext %j) {
+entry:
+ %cmp = icmp sgt i32 %i, 0
+ %cond = select i1 %cmp, i32 %i, i32 %j
+ ret i32 %cond
+
+; CHECK-LABEL: @testExpandISELToElse
+; CHECK: cmpwi r3, 0
+; CHECK-NEXT: bclr 12, 1, 0
+; CHECK: ori r3, r4, 0
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testReplaceISELWithCopy(i32 signext %i, i32 signext %j) {
+entry:
+ %cmp = icmp sgt i32 %i, 0
+ %cond = select i1 %cmp, i32 %j, i32 %j
+ ret i32 %cond
+
+; CHECK-LABEL: @testReplaceISELWithCopy
+
+; Fix me should really check: addi r3, r4, 0
+; but for some reason it's optimized to mr r3, r4
+; CHECK: mr r3, r4
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELToNull(i32 signext %i, i32 signext %j) {
+entry:
+ %cmp = icmp sgt i32 %i, 0
+ %cond = select i1 %cmp, i32 %i, i32 %i
+ ret i32 %cond
+
+; CHECK-LABEL: @testExpandISELToNull
+; CHECK-NOT: b {{.LBB[0-9]+}}
+; CHECK-NOT: bc
+; CHECK: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELsTo2ORIs2ADDIs
+ (i32 signext %a, i32 signext %b, i32 signext %d,
+ i32 signext %f, i32 signext %g) {
+entry:
+
+ %cmp = icmp sgt i32 %g, 0
+ %a.b = select i1 %cmp, i32 %g, i32 %b
+ %d.f = select i1 %cmp, i32 %d, i32 %f
+ %add = add nsw i32 %a.b, %d.f
+ ret i32 %add
+
+; CHECK-LABEL: @testExpandISELsTo2ORIs2ADDIs
+; CHECK: cmpwi r7, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK: ori r3, r4, 0
+; CHECK-NEXT: ori r12, r6, 0
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r3, r7, 0
+; CHECK-NEXT: addi r12, r5, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: add r3, r3, r12
+; CHECK-NEXT: extsw r3, r3
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELsTo2ORIs1ADDI
+ (i32 signext %a, i32 signext %b, i32 signext %d,
+ i32 signext %f, i32 signext %g) {
+entry:
+ %cmp = icmp sgt i32 %g, 0
+ %a.b = select i1 %cmp, i32 %a, i32 %b
+ %d.f = select i1 %cmp, i32 %d, i32 %f
+ %add = add nsw i32 %a.b, %d.f
+ ret i32 %add
+
+; CHECK-LABEL: @testExpandISELsTo2ORIs1ADDI
+; CHECK: cmpwi cr0, r7, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK: ori r3, r4, 0
+; CHECK-NEXT: ori r12, r6, 0
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r12, r5, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: add r3, r3, r12
+; CHECK-NEXT: extsw r3, r3
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELsTo1ORI1ADDI
+ (i32 signext %a, i32 signext %b, i32 signext %d,
+ i32 signext %f, i32 signext %g) {
+entry:
+
+ %cmp = icmp sgt i32 %g, 0
+ %a.b = select i1 %cmp, i32 %a, i32 %b
+ %d.f = select i1 %cmp, i32 %d, i32 %f
+ %add1 = add nsw i32 %a.b, %d.f
+ %add2 = add nsw i32 %a, %add1
+ ret i32 %add2
+
+; CHECK-LABEL: @testExpandISELsTo1ORI1ADDI
+; CHECK: cmpwi cr0, r7, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK: ori r5, r6, 0
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r4, r3, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: add r4, r4, r5
+; CHECK-NEXT: add r3, r3, r4
+; CHECK-NEXT: extsw r3, r3
+; CHECK-NEXT: blr
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i32 @testExpandISELsTo0ORI2ADDIs
+ (i32 signext %a, i32 signext %b, i32 signext %d,
+ i32 signext %f, i32 signext %g) {
+entry:
+
+ %cmp = icmp sgt i32 %g, 0
+ %a.b = select i1 %cmp, i32 %a, i32 %b
+ %d.f = select i1 %cmp, i32 %d, i32 %f
+ %add1 = add nsw i32 %a.b, %d.f
+ %add2 = add nsw i32 %a, %add1
+ %sub1 = sub nsw i32 %add2, %d
+ ret i32 %sub1
+
+; CHECK-LABEL: @testExpandISELsTo0ORI2ADDIs
+; CHECK: cmpwi cr0, r7, 0
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r4, r3, 0
+; CHECK-NEXT: addi r6, r5, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: add r4, r4, r6
+; CHECK-NEXT: add r3, r3, r4
+; CHECK-NEXT: subf r3, r5, r3
+; CHECK-NEXT: extsw r3, r3
+; CHECK-NEXT: blr
+}
+
+
+@b = common local_unnamed_addr global i32 0, align 4
+@a = common local_unnamed_addr global i32 0, align 4
+; Function Attrs: norecurse nounwind readonly
+define signext i32 @testComplexISEL() #0 {
+entry:
+ %0 = load i32, i32* @b, align 4, !tbaa !1
+ %tobool = icmp eq i32 %0, 0
+ br i1 %tobool, label %if.end, label %cleanup
+
+if.end:
+ %1 = load i32, i32* @a, align 4, !tbaa !1
+ %conv = sext i32 %1 to i64
+ %2 = inttoptr i64 %conv to i32 (...)*
+ %cmp = icmp eq i32 (...)* %2, bitcast (i32 ()* @testComplexISEL to i32 (...)*)
+ %conv3 = zext i1 %cmp to i32
+ br label %cleanup
+
+cleanup:
+ %retval.0 = phi i32 [ %conv3, %if.end ], [ 1, %entry ]
+ ret i32 %retval.0
+
+; CHECK-LABEL: @testComplexISEL
+; CHECK: bc 12, 2, [[TRUE:.LBB[0-9]+]]
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi r3, r12, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: clrldi r3, r3, 32
+; CHECK-NEXT: blr
+}
+
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
diff --git a/test/CodeGen/PowerPC/fast-isel-load-store.ll b/test/CodeGen/PowerPC/fast-isel-load-store.ll
index 1990f6b51d55..5317829c6ce9 100644
--- a/test/CodeGen/PowerPC/fast-isel-load-store.ll
+++ b/test/CodeGen/PowerPC/fast-isel-load-store.ll
@@ -196,7 +196,7 @@ define void @t17(i64 %v) nounwind {
%1 = add nsw i64 %v, 1
store i64 %1, i64* getelementptr inbounds ([8192 x i64], [8192 x i64]* @i, i32 0, i64 5000), align 8
; ELF64: addis
-; ELF64: ld
+; ELF64: addi
; ELF64: addi
; ELF64: lis
; ELF64: ori
diff --git a/test/CodeGen/PowerPC/fma-aggr-FMF.ll b/test/CodeGen/PowerPC/fma-aggr-FMF.ll
new file mode 100644
index 000000000000..8e97115bd1f2
--- /dev/null
+++ b/test/CodeGen/PowerPC/fma-aggr-FMF.ll
@@ -0,0 +1,35 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mtriple=powerpc64le-linux-gnu | FileCheck %s
+
+define float @can_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4) {
+; CHECK-LABEL: can_fma_with_fewer_uses:
+; CHECK: # BB#0:
+; CHECK-NEXT: xsmulsp 0, 1, 2
+; CHECK-NEXT: fmr 1, 0
+; CHECK-NEXT: xsmaddasp 1, 3, 4
+; CHECK-NEXT: xsdivsp 1, 0, 1
+; CHECK-NEXT: blr
+ %mul1 = fmul contract float %f1, %f2
+ %mul2 = fmul contract float %f3, %f4
+ %add = fadd contract float %mul1, %mul2
+ %second_use_of_mul1 = fdiv float %mul1, %add
+ ret float %second_use_of_mul1
+}
+
+; There is no contract on the mul with no extra use so we can't fuse that.
+; Since we are fusing with the mul with an extra use, the fmul needs to stick
+; around beside the fma.
+define float @no_fma_with_fewer_uses(float %f1, float %f2, float %f3, float %f4) {
+; CHECK-LABEL: no_fma_with_fewer_uses:
+; CHECK: # BB#0:
+; CHECK-NEXT: xsmulsp 0, 3, 4
+; CHECK-NEXT: xsmulsp 13, 1, 2
+; CHECK-NEXT: xsmaddasp 0, 1, 2
+; CHECK-NEXT: xsdivsp 1, 13, 0
+; CHECK-NEXT: blr
+ %mul1 = fmul contract float %f1, %f2
+ %mul2 = fmul float %f3, %f4
+ %add = fadd contract float %mul1, %mul2
+ %second_use_of_mul1 = fdiv float %mul1, %add
+ ret float %second_use_of_mul1
+}
diff --git a/test/CodeGen/PowerPC/fold-zero.ll b/test/CodeGen/PowerPC/fold-zero.ll
index 5e620ece0a99..180d8e1b9f55 100644
--- a/test/CodeGen/PowerPC/fold-zero.ll
+++ b/test/CodeGen/PowerPC/fold-zero.ll
@@ -1,5 +1,6 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -mattr=-crbits | FileCheck %s
-; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck -check-prefix=CHECK-CRB %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck --check-prefix=CHECK-CRB %s
+; RUN: llc -verify-machineinstrs -ppc-gen-isel=false < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -7,19 +8,33 @@ define i32 @test1(i1 %a, i32 %c) nounwind {
%x = select i1 %a, i32 %c, i32 0
ret i32 %x
-; CHECK: @test1
+; CHECK-LABEL: @test1
; CHECK-NOT: li {{[0-9]+}}, 0
; CHECK: isel 3, 0,
; CHECK: blr
+; CHECK-NO-ISEL-LABEL: @test1
+; CHECK-NO-ISEL: li 3, 0
+; CHECK-NO-ISEL-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 4, 0
+; CHECK-NO-ISEL-NEXT: blr
}
define i32 @test2(i1 %a, i32 %c) nounwind {
%x = select i1 %a, i32 0, i32 %c
ret i32 %x
-; CHECK-CRB: @test2
+; CHECK-CRB-LABEL: @test2
; CHECK-CRB-NOT: li {{[0-9]+}}, 0
; CHECK-CRB: isel 3, 0,
; CHECK-CRB: blr
+; CHECK-NO-ISEL-LABEL: @test2
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 4, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 0, 0
+; CHECK-NO-ISEL-NEXT: blr
}
diff --git a/test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll b/test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll
index 9b8fd4095793..955b1f27ca26 100644
--- a/test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll
+++ b/test/CodeGen/PowerPC/fp-int-conversions-direct-moves.ll
@@ -323,7 +323,7 @@ entry:
ret i64 %conv
; CHECK-LABEL: @_Z7testllff
; CHECK: xscvdpsxds [[CONVREG13:[0-9]+]], 1
-; CHECK: mfvsrd 3, [[CONVREG13]]
+; CHECK: mffprd 3, [[CONVREG13]]
}
; Function Attrs: nounwind
@@ -349,7 +349,7 @@ entry:
ret i64 %conv
; CHECK-LABEL: @_Z7testlldd
; CHECK: xscvdpsxds [[CONVREG14:[0-9]+]], 1
-; CHECK: mfvsrd 3, [[CONVREG14]]
+; CHECK: mffprd 3, [[CONVREG14]]
}
; Function Attrs: nounwind
@@ -375,7 +375,7 @@ entry:
ret i64 %conv
; CHECK-LABEL: @_Z8testullff
; CHECK: xscvdpuxds [[CONVREG15:[0-9]+]], 1
-; CHECK: mfvsrd 3, [[CONVREG15]]
+; CHECK: mffprd 3, [[CONVREG15]]
}
; Function Attrs: nounwind
@@ -401,7 +401,7 @@ entry:
ret i64 %conv
; CHECK-LABEL: @_Z8testulldd
; CHECK: xscvdpuxds [[CONVREG16:[0-9]+]], 1
-; CHECK: mfvsrd 3, [[CONVREG16]]
+; CHECK: mffprd 3, [[CONVREG16]]
}
; Function Attrs: nounwind
diff --git a/test/CodeGen/PowerPC/fp128-bitcast-after-operation.ll b/test/CodeGen/PowerPC/fp128-bitcast-after-operation.ll
index 2e537cd8a560..cd4eac42f26c 100644
--- a/test/CodeGen/PowerPC/fp128-bitcast-after-operation.ll
+++ b/test/CodeGen/PowerPC/fp128-bitcast-after-operation.ll
@@ -11,21 +11,17 @@ entry:
; PPC64-DAG: stxsdx 1, 0, [[ADDR_LO:[0-9]+]]
; PPC64-DAG: addi [[ADDR_HI]], [[SP:[0-9]+]], [[OFFSET_HI:-?[0-9]+]]
; PPC64-DAG: addi [[ADDR_LO]], [[SP]], [[OFFSET_LO:-?[0-9]+]]
-; PPC64-DAG: li [[MASK_REG:[0-9]+]], 1
-; PPC64: sldi [[MASK_REG]], [[MASK_REG]], 63
; PPC64-DAG: ld [[HI:[0-9]+]], [[OFFSET_LO]]([[SP]])
; PPC64-DAG: ld [[LO:[0-9]+]], [[OFFSET_HI]]([[SP]])
-; PPC64: and [[FLIP_BIT:[0-9]+]], [[HI]], [[MASK_REG]]
+; PPC64-DAG: rldicr [[FLIP_BIT:[0-9]+]], [[HI]], 0, 0
; PPC64-DAG: xor 3, [[HI]], [[FLIP_BIT]]
; PPC64-DAG: xor 4, [[LO]], [[FLIP_BIT]]
; PPC64: blr
; PPC64-P8-LABEL: test_abs:
-; PPC64-P8-DAG: mfvsrd [[LO:[0-9]+]], 2
-; PPC64-P8-DAG: mfvsrd [[HI:[0-9]+]], 1
-; PPC64-P8-DAG: li [[MASK_REG:[0-9]+]], 1
-; PPC64-P8-DAG: sldi [[SHIFT_REG:[0-9]+]], [[MASK_REG]], 63
-; PPC64-P8: and [[FLIP_BIT:[0-9]+]], [[HI]], [[SHIFT_REG]]
+; PPC64-P8-DAG: mffprd [[LO:[0-9]+]], 2
+; PPC64-P8-DAG: mffprd [[HI:[0-9]+]], 1
+; PPC64-P8-DAG: rldicr [[FLIP_BIT:[0-9]+]], [[HI]], 0, 0
; PPC64-P8-DAG: xor 3, [[HI]], [[FLIP_BIT]]
; PPC64-P8-DAG: xor 4, [[LO]], [[FLIP_BIT]]
; PPC64-P8: blr
@@ -63,10 +59,10 @@ entry:
; PPC64: blr
; PPC64-P8-LABEL: test_neg:
-; PPC64-P8-DAG: mfvsrd [[LO:[0-9]+]], 2
-; PPC64-P8-DAG: mfvsrd [[HI:[0-9]+]], 1
+; PPC64-P8-DAG: mffprd [[LO:[0-9]+]], 2
+; PPC64-P8-DAG: mffprd [[HI:[0-9]+]], 1
; PPC64-P8-DAG: li [[IMM1:[0-9]+]], 1
-; PPC64-P8-DAG: sldi [[FLIP_BIT]], [[IMM1]], 63
+; PPC64-P8-DAG: sldi [[FLIP_BIT:[0-9]+]], [[IMM1]], 63
; PPC64-P8-NOT: BARRIER
; PPC64-P8-DAG: xor 3, [[HI]], [[FLIP_BIT]]
; PPC64-P8-DAG: xor 4, [[LO]], [[FLIP_BIT]]
@@ -93,29 +89,25 @@ entry:
; PPC64-LABEL: test_copysign:
; PPC64-DAG: stxsdx 1, 0, [[ADDR_REG:[0-9]+]]
; PPC64-DAG: addi [[ADDR_REG]], 1, [[OFFSET:-?[0-9]+]]
-; PPC64-DAG: li [[SIGN:[0-9]+]], 1
-; PPC64-DAG: sldi [[SIGN]], [[SIGN]], 63
; PPC64-DAG: li [[HI_TMP:[0-9]+]], 16399
; PPC64-DAG: sldi [[CST_HI:[0-9]+]], [[HI_TMP]], 48
; PPC64-DAG: li [[LO_TMP:[0-9]+]], 3019
; PPC64-DAG: sldi [[CST_LO:[0-9]+]], [[LO_TMP]], 52
; PPC64-NOT: BARRIER
; PPC64-DAG: ld [[X_HI:[0-9]+]], [[OFFSET]](1)
-; PPC64-DAG: and [[NEW_HI_TMP:[0-9]+]], [[X_HI]], [[SIGN]]
+; PPC64-DAG: rldicr [[NEW_HI_TMP:[0-9]+]], [[X_HI]], 0, 0
; PPC64-DAG: or 3, [[NEW_HI_TMP]], [[CST_HI]]
-; PPC64-DAG: xor 4, [[SIGN]], [[CST_LO]]
+; PPC64-DAG: xor 4, [[NEW_HI_TMP]], [[CST_LO]]
; PPC64: blr
; PPC64-P8-LABEL: test_copysign:
-; PPC64-P8-DAG: mfvsrd [[X_HI:[0-9]+]], 1
-; PPC64-P8-DAG: li [[SIGN:[0-9]+]], 1
-; PPC64-P8-DAG: sldi [[SIGN]], [[SIGN]], 63
+; PPC64-P8-DAG: mffprd [[X_HI:[0-9]+]], 1
; PPC64-P8-DAG: li [[HI_TMP:[0-9]+]], 16399
; PPC64-P8-DAG: sldi [[CST_HI:[0-9]+]], [[HI_TMP]], 48
; PPC64-P8-DAG: li [[LO_TMP:[0-9]+]], 3019
; PPC64-P8-DAG: sldi [[CST_LO:[0-9]+]], [[LO_TMP]], 52
; PPC64-P8-NOT: BARRIER
-; PPC64-P8-DAG: and [[NEW_HI_TMP:[0-9]+]], [[X_HI]], [[SIGN]]
+; PPC64-P8-DAG: rldicr [[NEW_HI_TMP:[0-9]+]], [[X_HI]], 0, 0
; PPC64-P8-DAG: or 3, [[NEW_HI_TMP]], [[CST_HI]]
; PPC64-P8-DAG: xor 4, [[NEW_HI_TMP]], [[CST_LO]]
; PPC64-P8: blr
@@ -128,7 +120,7 @@ entry:
; PPC32-DAG: oris {{[0-9]+}}, [[FLIP_BIT]], 16399
; PPC32-DAG: xoris {{[0-9]+}}, [[FLIP_BIT]], 48304
; PPC32: blr
- %0 = tail call ppc_fp128 @llvm.copysign.ppcf128(ppc_fp128 0xMBCB0000000000000400F000000000000, ppc_fp128 %x)
+ %0 = tail call ppc_fp128 @llvm.copysign.ppcf128(ppc_fp128 0xM400F000000000000BCB0000000000000, ppc_fp128 %x)
%1 = bitcast ppc_fp128 %0 to i128
ret i128 %1
}
diff --git a/test/CodeGen/PowerPC/i1-ext-fold.ll b/test/CodeGen/PowerPC/i1-ext-fold.ll
index 9a71b7baa66b..877da486bcd0 100644
--- a/test/CodeGen/PowerPC/i1-ext-fold.ll
+++ b/test/CodeGen/PowerPC/i1-ext-fold.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -11,11 +12,19 @@ entry:
ret i32 %shl
; CHECK-LABEL: @foo
+; CHECK-NO-ISEL-LABEL: @foo
; CHECK-DAG: cmpw
; CHECK-DAG: li [[REG1:[0-9]+]], 0
; CHECK-DAG: li [[REG2:[0-9]+]], 16
; CHECK: isel 3, [[REG2]], [[REG1]],
; CHECK: blr
+
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 12, 0
+; CHECK-NO-ISEL-NEXT: blr
}
; Function Attrs: nounwind readnone
@@ -28,11 +37,19 @@ entry:
ret i32 %add1
; CHECK-LABEL: @foo2
+; CHECK-NO-ISEL-LABEL: @foo2
; CHECK-DAG: cmpw
; CHECK-DAG: li [[REG1:[0-9]+]], 5
; CHECK-DAG: li [[REG2:[0-9]+]], 21
; CHECK: isel 3, [[REG2]], [[REG1]],
; CHECK: blr
+
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 12, 0
+; CHECK-NO-ISEL-NEXT: blr
}
; Function Attrs: nounwind readnone
@@ -44,10 +61,18 @@ entry:
ret i32 %shl
; CHECK-LABEL: @foo3
+; CHECK-NO-ISEL-LABEL: @foo3
; CHECK-DAG: cmpw
; CHECK-DAG: li [[REG1:[0-9]+]], 16
; CHECK: isel 3, 0, [[REG1]],
; CHECK: blr
+
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 0, 0
+; CHECK-NO-ISEL-NEXT: blr
}
attributes #0 = { nounwind readnone }
diff --git a/test/CodeGen/PowerPC/i1-to-double.ll b/test/CodeGen/PowerPC/i1-to-double.ll
index 4b13388ff460..7871ac7ae05b 100644
--- a/test/CodeGen/PowerPC/i1-to-double.ll
+++ b/test/CodeGen/PowerPC/i1-to-double.ll
@@ -7,15 +7,13 @@ define double @test(i1 %X) {
; CHECK-LABEL: @test
; CHECK: andi. {{[0-9]+}}, 3, 1
-; CHECK: bc 12, 1,
-
-; CHECK: li 3, .LCP[[L1:[A-Z0-9_]+]]@l
-; CHECK: addis 3, 3, .LCP[[L1]]@ha
-; CHECK: lfs 1, 0(3)
-; CHECK: blr
-
-; CHECK: li 3, .LCP[[L2:[A-Z0-9_]+]]@l
-; CHECK: addis 3, 3, .LCP[[L2]]@ha
-; CHECK: lfs 1, 0(3)
-; CHECK: blr
-
+; CHECK-NEXT: addis 4, 4, .LCPI
+; CHECK-NEXT: addis 5, 5, .LCPI
+; CHECK-NEXT: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK: ori 3, 4, 0
+; CHECK-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NEXT: [[TRUE]]
+; CHECK-NEXT: addi 3, 5, 0
+; CHECK-NEXT: [[SUCCESSOR]]
+; CHECK-NEXT: lfs 1, 0(3)
+; CHECK-NEXT: blr
diff --git a/test/CodeGen/PowerPC/i64_fp_round.ll b/test/CodeGen/PowerPC/i64_fp_round.ll
index 1e95dfdec71a..9fe7a3bfcbb7 100644
--- a/test/CodeGen/PowerPC/i64_fp_round.ll
+++ b/test/CodeGen/PowerPC/i64_fp_round.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-fpcvt < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-fpcvt -ppc-gen-isel=false < %s | FileCheck %s --check-prefix=CHECK-NO-ISEL
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -12,10 +13,20 @@ entry:
; Note that only parts of the sequence are checked for here, to allow
; for minor code generation differences.
+;CHECK-LABEL: test
+;CHECK-NO-ISEL-LABEL: test
; CHECK: sradi [[REG1:[0-9]+]], 3, 53
; CHECK: addi [[REG2:[0-9]+]], [[REG1]], 1
; CHECK: cmpldi [[REG2]], 1
; CHECK: isel [[REG3:[0-9]+]], {{[0-9]+}}, 3, 1
+; CHECK-NO-ISEL: rldicr [[REG2:[0-9]+]], {{[0-9]+}}, 0, 52
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori [[REG3:[0-9]+]], 3, 0
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi [[REG3]], [[REG2]], 0
+; CHECK-NO-ISEL-NEXT: [[SUCCESSOR]]
+; CHECK-NO-ISEL: std [[REG3]], -{{[0-9]+}}(1)
; CHECK: std [[REG3]], -{{[0-9]+}}(1)
diff --git a/test/CodeGen/PowerPC/ifcvt.ll b/test/CodeGen/PowerPC/ifcvt.ll
index 9c966c95b72d..b9b594a68f12 100644
--- a/test/CodeGen/PowerPC/ifcvt.ll
+++ b/test/CodeGen/PowerPC/ifcvt.ll
@@ -1,4 +1,5 @@
; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -verify-machineinstrs -ppc-gen-isel=false | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -18,10 +19,18 @@ cond.false: ; preds = %sw.epilog
%add37 = add nsw i32 %conv29, %a
br label %cond.end
-; CHECK: @test
+; CHECK-LABEL: @test
+; CHECK-NO-ISEL-LABEL: @test
; CHECK: add [[REG:[0-9]+]],
; CHECK: subf [[REG2:[0-9]+]],
; CHECK: isel {{[0-9]+}}, [[REG]], [[REG2]],
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 5, 6, 0
+; CHECK-NO-ISEL: extsh 5, 5
+; CHECK-NO-ISEL-NEXT: add 3, 3, 5
+; CHECK-NO-ISEL-NEXT: blr
cond.end: ; preds = %cond.false, %cond.true
%cond = phi i32 [ %sub34, %cond.true ], [ %add37, %cond.false ]
diff --git a/test/CodeGen/PowerPC/indirectbr.ll b/test/CodeGen/PowerPC/indirectbr.ll
index d1e03ca7773a..c040d7859a8b 100644
--- a/test/CodeGen/PowerPC/indirectbr.ll
+++ b/test/CodeGen/PowerPC/indirectbr.ll
@@ -17,23 +17,35 @@ entry:
bb2: ; preds = %entry, %bb3
%gotovar.4.0 = phi i8* [ %gotovar.4.0.pre, %bb3 ], [ %0, %entry ] ; <i8*> [#uses=1]
; PIC: mtctr
-; PIC-NEXT: li
-; PIC-NEXT: li
-; PIC-NEXT: li
-; PIC-NEXT: li
; PIC-NEXT: bctr
+; PIC: li
+; PIC: b LBB
+; PIC: li
+; PIC: b LBB
+; PIC: li
+; PIC: b LBB
+; PIC: li
+; PIC: b LBB
; STATIC: mtctr
-; STATIC-NEXT: li
-; STATIC-NEXT: li
-; STATIC-NEXT: li
-; STATIC-NEXT: li
; STATIC-NEXT: bctr
+; STATIC: li
+; STATIC: b LBB
+; STATIC: li
+; STATIC: b LBB
+; STATIC: li
+; STATIC: b LBB
+; STATIC: li
+; STATIC: b LBB
; PPC64: mtctr
-; PPC64-NEXT: li
-; PPC64-NEXT: li
-; PPC64-NEXT: li
-; PPC64-NEXT: li
; PPC64-NEXT: bctr
+; PPC64: li
+; PPC64: b LBB
+; PPC64: li
+; PPC64: b LBB
+; PPC64: li
+; PPC64: b LBB
+; PPC64: li
+; PPC64: b LBB
indirectbr i8* %gotovar.4.0, [label %L5, label %L4, label %L3, label %L2, label %L1]
bb3: ; preds = %entry
diff --git a/test/CodeGen/PowerPC/isel.ll b/test/CodeGen/PowerPC/isel.ll
index 1dc55fcc40b2..c1cceb967018 100644
--- a/test/CodeGen/PowerPC/isel.ll
+++ b/test/CodeGen/PowerPC/isel.ll
@@ -2,14 +2,22 @@ target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
target triple = "powerpc64-unknown-linux-gnu"
; RUN: llc -verify-machineinstrs -mcpu=a2 < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
define i64 @test1(i64 %a, i64 %b, i64 %c, i64 %d) {
entry:
%p = icmp uge i64 %a, %b
%x = select i1 %p, i64 %c, i64 %d
ret i64 %x
-; CHECK: @test1
+; CHECK-LABEL: @test1
+; CHECK-NO-ISEL-LABEL: @test1
; CHECK: isel
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 6, 0
+; CHECK-NO-ISEL-NEXT: blr
}
define i32 @test2(i32 %a, i32 %b, i32 %c, i32 %d) {
@@ -17,7 +25,14 @@ entry:
%p = icmp uge i32 %a, %b
%x = select i1 %p, i32 %c, i32 %d
ret i32 %x
-; CHECK: @test2
+; CHECK-LABEL: @test2
+; CHECK-NO-ISEL-LABEL: @test2
; CHECK: isel
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 5, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 6, 0
+; CHECK-NO-ISEL-NEXT: blr
}
diff --git a/test/CodeGen/PowerPC/jaggedstructs.ll b/test/CodeGen/PowerPC/jaggedstructs.ll
index b28b34d7814f..6128316f45fa 100644
--- a/test/CodeGen/PowerPC/jaggedstructs.ll
+++ b/test/CodeGen/PowerPC/jaggedstructs.ll
@@ -18,31 +18,31 @@ entry:
ret void
}
-; CHECK: std 6, 184(1)
-; CHECK: std 5, 176(1)
-; CHECK: std 4, 168(1)
-; CHECK: std 3, 160(1)
-; CHECK: lbz {{[0-9]+}}, 167(1)
-; CHECK: lhz {{[0-9]+}}, 165(1)
-; CHECK: stb {{[0-9]+}}, 55(1)
-; CHECK: sth {{[0-9]+}}, 53(1)
-; CHECK: lbz {{[0-9]+}}, 175(1)
-; CHECK: lwz {{[0-9]+}}, 171(1)
-; CHECK: stb {{[0-9]+}}, 63(1)
-; CHECK: stw {{[0-9]+}}, 59(1)
-; CHECK: lhz {{[0-9]+}}, 182(1)
-; CHECK: lwz {{[0-9]+}}, 178(1)
-; CHECK: sth {{[0-9]+}}, 70(1)
-; CHECK: stw {{[0-9]+}}, 66(1)
-; CHECK: lbz {{[0-9]+}}, 191(1)
-; CHECK: lhz {{[0-9]+}}, 189(1)
-; CHECK: lwz {{[0-9]+}}, 185(1)
-; CHECK: stb {{[0-9]+}}, 79(1)
-; CHECK: sth {{[0-9]+}}, 77(1)
-; CHECK: stw {{[0-9]+}}, 73(1)
-; CHECK: ld 6, 72(1)
-; CHECK: ld 5, 64(1)
-; CHECK: ld 4, 56(1)
-; CHECK: ld 3, 48(1)
+; CHECK-DAG: std 3, 160(1)
+; CHECK-DAG: std 6, 184(1)
+; CHECK-DAG: std 5, 176(1)
+; CHECK-DAG: std 4, 168(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 167(1)
+; CHECK-DAG: lhz {{[0-9]+}}, 165(1)
+; CHECK-DAG: stb {{[0-9]+}}, 55(1)
+; CHECK-DAG-DAG: sth {{[0-9]+}}, 53(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 175(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 171(1)
+; CHECK-DAG: stb {{[0-9]+}}, 63(1)
+; CHECK-DAG: stw {{[0-9]+}}, 59(1)
+; CHECK-DAG: lhz {{[0-9]+}}, 182(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 178(1)
+; CHECK-DAG: sth {{[0-9]+}}, 70(1)
+; CHECK-DAG: stw {{[0-9]+}}, 66(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 191(1)
+; CHECK-DAG: lhz {{[0-9]+}}, 189(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 185(1)
+; CHECK-DAG: stb {{[0-9]+}}, 79(1)
+; CHECK-DAG: sth {{[0-9]+}}, 77(1)
+; CHECK-DAG: stw {{[0-9]+}}, 73(1)
+; CHECK-DAG: ld 6, 72(1)
+; CHECK-DAG: ld 5, 64(1)
+; CHECK-DAG: ld 4, 56(1)
+; CHECK-DAG: ld 3, 48(1)
declare void @check(%struct.S3* byval, %struct.S5* byval, %struct.S6* byval, %struct.S7* byval)
diff --git a/test/CodeGen/PowerPC/lsa.ll b/test/CodeGen/PowerPC/lsa.ll
index dc74b9dbca22..d0ebd473133c 100644
--- a/test/CodeGen/PowerPC/lsa.ll
+++ b/test/CodeGen/PowerPC/lsa.ll
@@ -8,11 +8,11 @@ entry:
%w = alloca [8200 x i32], align 4
%q = alloca [8200 x i32], align 4
%0 = bitcast [8200 x i32]* %v to i8*
- call void @llvm.lifetime.start(i64 32800, i8* %0) #0
+ call void @llvm.lifetime.start.p0i8(i64 32800, i8* %0) #0
%1 = bitcast [8200 x i32]* %w to i8*
- call void @llvm.lifetime.start(i64 32800, i8* %1) #0
+ call void @llvm.lifetime.start.p0i8(i64 32800, i8* %1) #0
%2 = bitcast [8200 x i32]* %q to i8*
- call void @llvm.lifetime.start(i64 32800, i8* %2) #0
+ call void @llvm.lifetime.start.p0i8(i64 32800, i8* %2) #0
%arraydecay = getelementptr inbounds [8200 x i32], [8200 x i32]* %q, i64 0, i64 0
%arraydecay1 = getelementptr inbounds [8200 x i32], [8200 x i32]* %v, i64 0, i64 0
%arraydecay2 = getelementptr inbounds [8200 x i32], [8200 x i32]* %w, i64 0, i64 0
@@ -28,16 +28,16 @@ entry:
; CHECK: blr
%add = add nsw i32 %4, %3
- call void @llvm.lifetime.end(i64 32800, i8* %2) #0
- call void @llvm.lifetime.end(i64 32800, i8* %1) #0
- call void @llvm.lifetime.end(i64 32800, i8* %0) #0
+ call void @llvm.lifetime.end.p0i8(i64 32800, i8* %2) #0
+ call void @llvm.lifetime.end.p0i8(i64 32800, i8* %1) #0
+ call void @llvm.lifetime.end.p0i8(i64 32800, i8* %0) #0
ret i32 %add
}
-declare void @llvm.lifetime.start(i64, i8* nocapture) #0
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #0
declare void @bar(i32*, i32*, i32*)
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
attributes #0 = { nounwind }
diff --git a/test/CodeGen/PowerPC/mature-mc-support.ll b/test/CodeGen/PowerPC/mature-mc-support.ll
index aa387f6e2666..543877d60cfa 100644
--- a/test/CodeGen/PowerPC/mature-mc-support.ll
+++ b/test/CodeGen/PowerPC/mature-mc-support.ll
@@ -28,4 +28,4 @@
module asm " .this_directive_is_very_unlikely_to_exist"
-; CHECK: LLVM ERROR: Error parsing inline asm
+; CHECK: error: unknown directive
diff --git a/test/CodeGen/PowerPC/mcm-obj.ll b/test/CodeGen/PowerPC/mcm-obj.ll
index 6b5b0c2b7425..fa899b5b3016 100644
--- a/test/CodeGen/PowerPC/mcm-obj.ll
+++ b/test/CodeGen/PowerPC/mcm-obj.ll
@@ -108,11 +108,10 @@ entry:
ret i32 %0
}
-; Verify generation of R_PPC64_TOC16_HA and R_PPC64_TOC16_LO_DS for
-; accessing tentatively declared variable ti.
+; Verify generation of relocations foraccessing variable ti.
;
; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM6:[^ ]+]]
-; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM6]]
+; MEDIUM-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO [[SYM6]]
;
; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_HA [[SYM6:[^ ]+]]
; LARGE-NEXT: 0x{{[0-9,A-F]+}} R_PPC64_TOC16_LO_DS [[SYM6]]
diff --git a/test/CodeGen/PowerPC/misched-inorder-latency.ll b/test/CodeGen/PowerPC/misched-inorder-latency.ll
index ded3111da977..26663d81f357 100644
--- a/test/CodeGen/PowerPC/misched-inorder-latency.ll
+++ b/test/CodeGen/PowerPC/misched-inorder-latency.ll
@@ -17,7 +17,7 @@ entry:
%sum1 = add i32 %sumin, 1
%val1 = load i32, i32* %ptr
%p = icmp eq i32 %sumin, 0
- br i1 %p, label %true, label %end
+ br i1 %p, label %true, label %end, !prof !1
true:
%sum2 = add i32 %sum1, 1
%ptr2 = getelementptr i32, i32* %ptr, i32 1
@@ -53,3 +53,5 @@ end:
ret i32 %valmerge
}
declare void @llvm.prefetch(i8*, i32, i32, i32) nounwind
+
+!1 = !{!"branch_weights", i32 2, i32 1}
diff --git a/test/CodeGen/PowerPC/optcmp.ll b/test/CodeGen/PowerPC/optcmp.ll
index 5e8ca5a6a678..a1921452d620 100644
--- a/test/CodeGen/PowerPC/optcmp.ll
+++ b/test/CodeGen/PowerPC/optcmp.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -mattr=-crbits -disable-ppc-cmp-opt=0 | FileCheck %s
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -mattr=-crbits -disable-ppc-cmp-opt=0 -ppc-gen-isel=false | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-f128:128:128-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -34,9 +35,14 @@ entry:
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
-; CHECK: @fool
+; CHECK-LABEL: @fool
+; CHECK-NO-ISEL-LABEL: @fool
; CHECK: subf. [[REG:[0-9]+]], 4, 3
; CHECK: isel 3, 3, 4, 1
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 4, 0
+; CHECK-NO-ISEL: b [[SUCCESSOR:.LBB[0-9]+]]
+
; CHECK: std [[REG]], 0(5)
}
@@ -48,9 +54,13 @@ entry:
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
-; CHECK: @foolb
+; CHECK-LABEL: @foolb
+; CHECK-NO-ISEL-LABEL: @foolb
; CHECK: subf. [[REG:[0-9]+]], 4, 3
; CHECK: isel 3, 4, 3, 1
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: b .LBB
+; CHECK-NO-ISEL addi: 3, 4, 0
; CHECK: std [[REG]], 0(5)
}
@@ -62,9 +72,13 @@ entry:
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
-; CHECK: @foolc
+; CHECK-LABEL: @foolc
+; CHECK-NO-ISEL-LABEL: @foolc
; CHECK: subf. [[REG:[0-9]+]], 3, 4
; CHECK: isel 3, 3, 4, 0
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 4, 0
+; CHECK-NO-ISEL: b [[SUCCESSOR:.LBB[0-9]+]]
; CHECK: std [[REG]], 0(5)
}
@@ -76,9 +90,13 @@ entry:
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
-; CHECK: @foold
+; CHECK-LABEL: @foold
+; CHECK-NO-ISEL-LABEL: @foold
; CHECK: subf. [[REG:[0-9]+]], 3, 4
; CHECK: isel 3, 3, 4, 1
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 4, 0
+; CHECK-NO-ISEL: b [[SUCCESSOR:.LBB[0-9]+]]
; CHECK: std [[REG]], 0(5)
}
@@ -90,9 +108,13 @@ entry:
%cond = select i1 %cmp, i64 %a, i64 %b
ret i64 %cond
-; CHECK: @foold2
+; CHECK-LABEL: @foold2
+; CHECK-NO-ISEL-LABEL: @foold2
; CHECK: subf. [[REG:[0-9]+]], 4, 3
; CHECK: isel 3, 3, 4, 0
+; CHECK-NO-ISEL: bc 12, 0, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 4, 0
+; CHECK-NO-ISEL: b [[SUCCESSOR:.LBB[0-9]+]]
; CHECK: std [[REG]], 0(5)
}
diff --git a/test/CodeGen/PowerPC/p8-isel-sched.ll b/test/CodeGen/PowerPC/p8-isel-sched.ll
index 6fa5616dd42a..b45a123f0276 100644
--- a/test/CodeGen/PowerPC/p8-isel-sched.ll
+++ b/test/CodeGen/PowerPC/p8-isel-sched.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr8 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -24,10 +25,20 @@ entry:
; Make sure that we don't schedule all of the isels together, they should be
; intermixed with the adds because each isel starts a new dispatch group.
; CHECK-LABEL: @foo
+; CHECK-NO-ISEL-LABEL: @foo
; CHECK: isel
+; CHECK-NO-ISEL: bc 12, 2, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 7, 12, 0
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 7, 11, 0
; CHECK: addi
; CHECK: isel
+; CHECK-NO-ISEL: bc 12, 2, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 10, 11, 0
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 10, 12, 0
; CHECK: blr
attributes #0 = { nounwind }
-
diff --git a/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll b/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll
index 1f317992a3b7..f399b2584d0b 100644
--- a/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll
+++ b/test/CodeGen/PowerPC/p8-scalar_vector_conversions.ll
@@ -7,13 +7,10 @@
@d = common global double 0.000000e+00, align 8
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define <16 x i8> @buildc(i8 zeroext %a) {
entry:
- %a.addr = alloca i8, align 1
- store i8 %a, i8* %a.addr, align 1
- %0 = load i8, i8* %a.addr, align 1
- %splat.splatinsert = insertelement <16 x i8> undef, i8 %0, i32 0
+ %splat.splatinsert = insertelement <16 x i8> undef, i8 %a, i32 0
%splat.splat = shufflevector <16 x i8> %splat.splatinsert, <16 x i8> undef, <16 x i32> zeroinitializer
ret <16 x i8> %splat.splat
; CHECK: sldi [[REG1:[0-9]+]], 3, 56
@@ -22,13 +19,10 @@ entry:
; CHECK-LE: xxswapd {{[0-9]+}}, [[REG1]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define <8 x i16> @builds(i16 zeroext %a) {
entry:
- %a.addr = alloca i16, align 2
- store i16 %a, i16* %a.addr, align 2
- %0 = load i16, i16* %a.addr, align 2
- %splat.splatinsert = insertelement <8 x i16> undef, i16 %0, i32 0
+ %splat.splatinsert = insertelement <8 x i16> undef, i16 %a, i32 0
%splat.splat = shufflevector <8 x i16> %splat.splatinsert, <8 x i16> undef, <8 x i32> zeroinitializer
ret <8 x i16> %splat.splat
; CHECK: sldi [[REG1:[0-9]+]], 3, 48
@@ -37,13 +31,10 @@ entry:
; CHECK-LE: xxswapd {{[0-9]+}}, [[REG1]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define <4 x i32> @buildi(i32 zeroext %a) {
entry:
- %a.addr = alloca i32, align 4
- store i32 %a, i32* %a.addr, align 4
- %0 = load i32, i32* %a.addr, align 4
- %splat.splatinsert = insertelement <4 x i32> undef, i32 %0, i32 0
+ %splat.splatinsert = insertelement <4 x i32> undef, i32 %a, i32 0
%splat.splat = shufflevector <4 x i32> %splat.splatinsert, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> %splat.splat
; CHECK: mtvsrwz [[REG1:[0-9]+]], 3
@@ -52,13 +43,10 @@ entry:
; CHECK-LE: xxspltw 34, [[REG1]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define <2 x i64> @buildl(i64 %a) {
entry:
- %a.addr = alloca i64, align 8
- store i64 %a, i64* %a.addr, align 8
- %0 = load i64, i64* %a.addr, align 8
- %splat.splatinsert = insertelement <2 x i64> undef, i64 %0, i32 0
+ %splat.splatinsert = insertelement <2 x i64> undef, i64 %a, i32 0
%splat.splat = shufflevector <2 x i64> %splat.splatinsert, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %splat.splat
; CHECK: mtvsrd {{[0-9]+}}, 3
@@ -66,13 +54,10 @@ entry:
; CHECK-LE: xxspltd 34, [[REG1]], 0
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define <4 x float> @buildf(float %a) {
entry:
- %a.addr = alloca float, align 4
- store float %a, float* %a.addr, align 4
- %0 = load float, float* %a.addr, align 4
- %splat.splatinsert = insertelement <4 x float> undef, float %0, i32 0
+ %splat.splatinsert = insertelement <4 x float> undef, float %a, i32 0
%splat.splat = shufflevector <4 x float> %splat.splatinsert, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %splat.splat
; CHECK: xscvdpspn [[REG1:[0-9]+]], 1
@@ -83,8 +68,8 @@ entry:
; The optimization to remove stack operations from PPCDAGToDAGISel::Select
; should still trigger for v2f64, producing an lxvdsx.
-; Function Attrs: nounwind
-define <2 x double> @buildd() #0 {
+; Function Attrs: norecurse nounwind readonly
+define <2 x double> @buildd() {
entry:
%0 = load double, double* @d, align 8
%splat.splatinsert = insertelement <2 x double> undef, double %0, i32 0
@@ -96,13 +81,10 @@ entry:
; CHECK-LE: lxvdsx 34, 0, [[REG1]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc0(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 0
+ %vecext = extractelement <16 x i8> %vsc, i32 0
ret i8 %vecext
; CHECK-LABEL: @getsc0
; CHECK: mfvsrd 3, 34
@@ -114,13 +96,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc1(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 1
+ %vecext = extractelement <16 x i8> %vsc, i32 1
ret i8 %vecext
; CHECK-LABEL: @getsc1
; CHECK: mfvsrd 3, 34
@@ -132,13 +111,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc2(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 2
+ %vecext = extractelement <16 x i8> %vsc, i32 2
ret i8 %vecext
; CHECK-LABEL: @getsc2
; CHECK: mfvsrd 3, 34
@@ -150,13 +126,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc3(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 3
+ %vecext = extractelement <16 x i8> %vsc, i32 3
ret i8 %vecext
; CHECK-LABEL: @getsc3
; CHECK: mfvsrd 3, 34
@@ -168,13 +141,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc4(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 4
+ %vecext = extractelement <16 x i8> %vsc, i32 4
ret i8 %vecext
; CHECK-LABEL: @getsc4
; CHECK: mfvsrd 3, 34
@@ -186,13 +156,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc5(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 5
+ %vecext = extractelement <16 x i8> %vsc, i32 5
ret i8 %vecext
; CHECK-LABEL: @getsc5
; CHECK: mfvsrd 3, 34
@@ -204,13 +171,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc6(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 6
+ %vecext = extractelement <16 x i8> %vsc, i32 6
ret i8 %vecext
; CHECK-LABEL: @getsc6
; CHECK: mfvsrd 3, 34
@@ -222,13 +186,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc7(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 7
+ %vecext = extractelement <16 x i8> %vsc, i32 7
ret i8 %vecext
; CHECK-LABEL: @getsc7
; CHECK: mfvsrd 3, 34
@@ -240,13 +201,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc8(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 8
+ %vecext = extractelement <16 x i8> %vsc, i32 8
ret i8 %vecext
; CHECK-LABEL: @getsc8
; CHECK: mfvsrd 3,
@@ -258,13 +216,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc9(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 9
+ %vecext = extractelement <16 x i8> %vsc, i32 9
ret i8 %vecext
; CHECK-LABEL: @getsc9
; CHECK: mfvsrd 3,
@@ -276,13 +231,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc10(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 10
+ %vecext = extractelement <16 x i8> %vsc, i32 10
ret i8 %vecext
; CHECK-LABEL: @getsc10
; CHECK: mfvsrd 3,
@@ -294,13 +246,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc11(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 11
+ %vecext = extractelement <16 x i8> %vsc, i32 11
ret i8 %vecext
; CHECK-LABEL: @getsc11
; CHECK: mfvsrd 3,
@@ -312,13 +261,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc12(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 12
+ %vecext = extractelement <16 x i8> %vsc, i32 12
ret i8 %vecext
; CHECK-LABEL: @getsc12
; CHECK: mfvsrd 3,
@@ -330,13 +276,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc13(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 13
+ %vecext = extractelement <16 x i8> %vsc, i32 13
ret i8 %vecext
; CHECK-LABEL: @getsc13
; CHECK: mfvsrd 3,
@@ -348,13 +291,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc14(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 14
+ %vecext = extractelement <16 x i8> %vsc, i32 14
ret i8 %vecext
; CHECK-LABEL: @getsc14
; CHECK: mfvsrd 3,
@@ -366,13 +306,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getsc15(<16 x i8> %vsc) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 15
+ %vecext = extractelement <16 x i8> %vsc, i32 15
ret i8 %vecext
; CHECK-LABEL: @getsc15
; CHECK: mfvsrd 3,
@@ -383,13 +320,10 @@ entry:
; CHECK-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc0(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 0
+ %vecext = extractelement <16 x i8> %vuc, i32 0
ret i8 %vecext
; CHECK-LABEL: @getuc0
; CHECK: mfvsrd 3, 34
@@ -400,13 +334,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc1(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 1
+ %vecext = extractelement <16 x i8> %vuc, i32 1
ret i8 %vecext
; CHECK-LABEL: @getuc1
; CHECK: mfvsrd 3, 34
@@ -418,13 +349,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc2(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 2
+ %vecext = extractelement <16 x i8> %vuc, i32 2
ret i8 %vecext
; CHECK-LABEL: @getuc2
; CHECK: mfvsrd 3, 34
@@ -436,13 +364,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc3(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 3
+ %vecext = extractelement <16 x i8> %vuc, i32 3
ret i8 %vecext
; CHECK-LABEL: @getuc3
; CHECK: mfvsrd 3, 34
@@ -454,13 +379,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc4(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 4
+ %vecext = extractelement <16 x i8> %vuc, i32 4
ret i8 %vecext
; CHECK-LABEL: @getuc4
; CHECK: mfvsrd 3, 34
@@ -472,13 +394,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc5(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 5
+ %vecext = extractelement <16 x i8> %vuc, i32 5
ret i8 %vecext
; CHECK-LABEL: @getuc5
; CHECK: mfvsrd 3, 34
@@ -490,13 +409,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc6(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 6
+ %vecext = extractelement <16 x i8> %vuc, i32 6
ret i8 %vecext
; CHECK-LABEL: @getuc6
; CHECK: mfvsrd 3, 34
@@ -508,13 +424,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc7(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 7
+ %vecext = extractelement <16 x i8> %vuc, i32 7
ret i8 %vecext
; CHECK-LABEL: @getuc7
; CHECK: mfvsrd 3, 34
@@ -525,13 +438,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc8(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 8
+ %vecext = extractelement <16 x i8> %vuc, i32 8
ret i8 %vecext
; CHECK-LABEL: @getuc8
; CHECK: mfvsrd 3,
@@ -542,13 +452,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc9(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 9
+ %vecext = extractelement <16 x i8> %vuc, i32 9
ret i8 %vecext
; CHECK-LABEL: @getuc9
; CHECK: mfvsrd 3,
@@ -560,13 +467,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc10(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 10
+ %vecext = extractelement <16 x i8> %vuc, i32 10
ret i8 %vecext
; CHECK-LABEL: @getuc10
; CHECK: mfvsrd 3,
@@ -578,13 +482,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc11(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 11
+ %vecext = extractelement <16 x i8> %vuc, i32 11
ret i8 %vecext
; CHECK-LABEL: @getuc11
; CHECK: mfvsrd 3,
@@ -596,13 +497,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc12(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 12
+ %vecext = extractelement <16 x i8> %vuc, i32 12
ret i8 %vecext
; CHECK-LABEL: @getuc12
; CHECK: mfvsrd 3,
@@ -614,13 +512,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc13(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 13
+ %vecext = extractelement <16 x i8> %vuc, i32 13
ret i8 %vecext
; CHECK-LABEL: @getuc13
; CHECK: mfvsrd 3,
@@ -632,13 +527,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc14(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 14
+ %vecext = extractelement <16 x i8> %vuc, i32 14
ret i8 %vecext
; CHECK-LABEL: @getuc14
; CHECK: mfvsrd 3,
@@ -650,13 +542,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getuc15(<16 x i8> %vuc) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %vecext = extractelement <16 x i8> %0, i32 15
+ %vecext = extractelement <16 x i8> %vuc, i32 15
ret i8 %vecext
; CHECK-LABEL: @getuc15
; CHECK: mfvsrd 3,
@@ -667,16 +556,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i8 @getvelsc(<16 x i8> %vsc, i32 signext %i) {
entry:
- %vsc.addr = alloca <16 x i8>, align 16
- %i.addr = alloca i32, align 4
- store <16 x i8> %vsc, <16 x i8>* %vsc.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <16 x i8>, <16 x i8>* %vsc.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <16 x i8> %0, i32 %1
+ %vecext = extractelement <16 x i8> %vsc, i32 %i
ret i8 %vecext
; CHECK-LABEL: @getvelsc
; CHECK-DAG: andi. [[ANDI:[0-9]+]], {{[0-9]+}}, 8
@@ -701,16 +584,10 @@ entry:
; CHECK-DAG-LE: extsb 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i8 @getveluc(<16 x i8> %vuc, i32 signext %i) {
entry:
- %vuc.addr = alloca <16 x i8>, align 16
- %i.addr = alloca i32, align 4
- store <16 x i8> %vuc, <16 x i8>* %vuc.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <16 x i8>, <16 x i8>* %vuc.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <16 x i8> %0, i32 %1
+ %vecext = extractelement <16 x i8> %vuc, i32 %i
ret i8 %vecext
; CHECK-LABEL: @getveluc
; CHECK-DAG: andi. [[ANDI:[0-9]+]], {{[0-9]+}}, 8
@@ -735,13 +612,10 @@ entry:
; CHECK-DAG-LE: clrldi 3, 3, 56
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss0(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 0
+ %vecext = extractelement <8 x i16> %vss, i32 0
ret i16 %vecext
; CHECK-LABEL: @getss0
; CHECK: mfvsrd 3, 34
@@ -753,13 +627,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss1(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 1
+ %vecext = extractelement <8 x i16> %vss, i32 1
ret i16 %vecext
; CHECK-LABEL: @getss1
; CHECK: mfvsrd 3, 34
@@ -771,13 +642,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss2(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 2
+ %vecext = extractelement <8 x i16> %vss, i32 2
ret i16 %vecext
; CHECK-LABEL: @getss2
; CHECK: mfvsrd 3, 34
@@ -789,13 +657,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss3(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 3
+ %vecext = extractelement <8 x i16> %vss, i32 3
ret i16 %vecext
; CHECK-LABEL: @getss3
; CHECK: mfvsrd 3, 34
@@ -807,13 +672,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss4(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 4
+ %vecext = extractelement <8 x i16> %vss, i32 4
ret i16 %vecext
; CHECK-LABEL: @getss4
; CHECK: mfvsrd 3,
@@ -825,13 +687,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss5(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 5
+ %vecext = extractelement <8 x i16> %vss, i32 5
ret i16 %vecext
; CHECK-LABEL: @getss5
; CHECK: mfvsrd 3,
@@ -843,13 +702,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss6(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 6
+ %vecext = extractelement <8 x i16> %vss, i32 6
ret i16 %vecext
; CHECK-LABEL: @getss6
; CHECK: mfvsrd 3,
@@ -861,13 +717,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getss7(<8 x i16> %vss) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 7
+ %vecext = extractelement <8 x i16> %vss, i32 7
ret i16 %vecext
; CHECK-LABEL: @getss7
; CHECK: mfvsrd 3,
@@ -878,13 +731,10 @@ entry:
; CHECK-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus0(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 0
+ %vecext = extractelement <8 x i16> %vus, i32 0
ret i16 %vecext
; CHECK-LABEL: @getus0
; CHECK: mfvsrd 3, 34
@@ -895,13 +745,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus1(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 1
+ %vecext = extractelement <8 x i16> %vus, i32 1
ret i16 %vecext
; CHECK-LABEL: @getus1
; CHECK: mfvsrd 3, 34
@@ -913,13 +760,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus2(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 2
+ %vecext = extractelement <8 x i16> %vus, i32 2
ret i16 %vecext
; CHECK-LABEL: @getus2
; CHECK: mfvsrd 3, 34
@@ -931,13 +775,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus3(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 3
+ %vecext = extractelement <8 x i16> %vus, i32 3
ret i16 %vecext
; CHECK-LABEL: @getus3
; CHECK: mfvsrd 3, 34
@@ -948,13 +789,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus4(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 4
+ %vecext = extractelement <8 x i16> %vus, i32 4
ret i16 %vecext
; CHECK-LABEL: @getus4
; CHECK: mfvsrd 3,
@@ -965,13 +803,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus5(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 5
+ %vecext = extractelement <8 x i16> %vus, i32 5
ret i16 %vecext
; CHECK-LABEL: @getus5
; CHECK: mfvsrd 3,
@@ -983,13 +818,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus6(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 6
+ %vecext = extractelement <8 x i16> %vus, i32 6
ret i16 %vecext
; CHECK-LABEL: @getus6
; CHECK: mfvsrd 3,
@@ -1001,13 +833,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getus7(<8 x i16> %vus) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %vecext = extractelement <8 x i16> %0, i32 7
+ %vecext = extractelement <8 x i16> %vus, i32 7
ret i16 %vecext
; CHECK-LABEL: @getus7
; CHECK: mfvsrd 3,
@@ -1018,16 +847,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i16 @getvelss(<8 x i16> %vss, i32 signext %i) {
entry:
- %vss.addr = alloca <8 x i16>, align 16
- %i.addr = alloca i32, align 4
- store <8 x i16> %vss, <8 x i16>* %vss.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <8 x i16>, <8 x i16>* %vss.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <8 x i16> %0, i32 %1
+ %vecext = extractelement <8 x i16> %vss, i32 %i
ret i16 %vecext
; CHECK-LABEL: @getvelss
; CHECK-DAG: andi. [[ANDI:[0-9]+]], {{[0-9]+}}, 4
@@ -1054,16 +877,10 @@ entry:
; CHECK-DAG-LE: extsh 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i16 @getvelus(<8 x i16> %vus, i32 signext %i) {
entry:
- %vus.addr = alloca <8 x i16>, align 16
- %i.addr = alloca i32, align 4
- store <8 x i16> %vus, <8 x i16>* %vus.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <8 x i16>, <8 x i16>* %vus.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <8 x i16> %0, i32 %1
+ %vecext = extractelement <8 x i16> %vus, i32 %i
ret i16 %vecext
; CHECK-LABEL: @getvelus
; CHECK-DAG: andi. [[ANDI:[0-9]+]], {{[0-9]+}}, 4
@@ -1090,13 +907,10 @@ entry:
; CHECK-DAG-LE: clrldi 3, 3, 48
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i32 @getsi0(<4 x i32> %vsi) {
entry:
- %vsi.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vsi, <4 x i32>* %vsi.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vsi.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 0
+ %vecext = extractelement <4 x i32> %vsi, i32 0
ret i32 %vecext
; CHECK-LABEL: @getsi0
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 3
@@ -1108,13 +922,10 @@ entry:
; CHECK-LE: extsw 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i32 @getsi1(<4 x i32> %vsi) {
entry:
- %vsi.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vsi, <4 x i32>* %vsi.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vsi.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 1
+ %vecext = extractelement <4 x i32> %vsi, i32 1
ret i32 %vecext
; CHECK-LABEL: @getsi1
; CHECK: mfvsrwz 3, 34
@@ -1125,13 +936,10 @@ entry:
; CHECK-LE: extsw 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i32 @getsi2(<4 x i32> %vsi) {
entry:
- %vsi.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vsi, <4 x i32>* %vsi.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vsi.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 2
+ %vecext = extractelement <4 x i32> %vsi, i32 2
ret i32 %vecext
; CHECK-LABEL: @getsi2
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 1
@@ -1142,13 +950,10 @@ entry:
; CHECK-LE: extsw 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i32 @getsi3(<4 x i32> %vsi) {
entry:
- %vsi.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vsi, <4 x i32>* %vsi.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vsi.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 3
+ %vecext = extractelement <4 x i32> %vsi, i32 3
ret i32 %vecext
; CHECK-LABEL: @getsi3
; CHECK: xxswapd [[SHL:[0-9]+]], 34
@@ -1160,13 +965,10 @@ entry:
; CHECK-LE: extsw 3, 3
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i32 @getui0(<4 x i32> %vui) {
entry:
- %vui.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vui, <4 x i32>* %vui.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vui.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 0
+ %vecext = extractelement <4 x i32> %vui, i32 0
ret i32 %vecext
; CHECK-LABEL: @getui0
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 3
@@ -1178,13 +980,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 32
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i32 @getui1(<4 x i32> %vui) {
entry:
- %vui.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vui, <4 x i32>* %vui.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vui.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 1
+ %vecext = extractelement <4 x i32> %vui, i32 1
ret i32 %vecext
; CHECK-LABEL: @getui1
; CHECK: mfvsrwz 3, 34
@@ -1195,13 +994,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 32
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i32 @getui2(<4 x i32> %vui) {
entry:
- %vui.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vui, <4 x i32>* %vui.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vui.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 2
+ %vecext = extractelement <4 x i32> %vui, i32 2
ret i32 %vecext
; CHECK-LABEL: @getui2
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 1
@@ -1212,13 +1008,10 @@ entry:
; CHECK-LE: clrldi 3, 3, 32
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i32 @getui3(<4 x i32> %vui) {
entry:
- %vui.addr = alloca <4 x i32>, align 16
- store <4 x i32> %vui, <4 x i32>* %vui.addr, align 16
- %0 = load <4 x i32>, <4 x i32>* %vui.addr, align 16
- %vecext = extractelement <4 x i32> %0, i32 3
+ %vecext = extractelement <4 x i32> %vui, i32 3
ret i32 %vecext
; CHECK-LABEL: @getui3
; CHECK: xxswapd [[SHL:[0-9]+]], 34
@@ -1230,45 +1023,30 @@ entry:
; CHECK-LE: clrldi 3, 3, 32
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define signext i32 @getvelsi(<4 x i32> %vsi, i32 signext %i) {
entry:
- %vsi.addr = alloca <4 x i32>, align 16
- %i.addr = alloca i32, align 4
- store <4 x i32> %vsi, <4 x i32>* %vsi.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <4 x i32>, <4 x i32>* %vsi.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <4 x i32> %0, i32 %1
+ %vecext = extractelement <4 x i32> %vsi, i32 %i
ret i32 %vecext
; CHECK-LABEL: @getvelsi
; CHECK-LE-LABEL: @getvelsi
; FIXME: add check patterns when variable element extraction is implemented
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define zeroext i32 @getvelui(<4 x i32> %vui, i32 signext %i) {
entry:
- %vui.addr = alloca <4 x i32>, align 16
- %i.addr = alloca i32, align 4
- store <4 x i32> %vui, <4 x i32>* %vui.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <4 x i32>, <4 x i32>* %vui.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <4 x i32> %0, i32 %1
+ %vecext = extractelement <4 x i32> %vui, i32 %i
ret i32 %vecext
; CHECK-LABEL: @getvelui
; CHECK-LE-LABEL: @getvelui
; FIXME: add check patterns when variable element extraction is implemented
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getsl0(<2 x i64> %vsl) {
entry:
- %vsl.addr = alloca <2 x i64>, align 16
- store <2 x i64> %vsl, <2 x i64>* %vsl.addr, align 16
- %0 = load <2 x i64>, <2 x i64>* %vsl.addr, align 16
- %vecext = extractelement <2 x i64> %0, i32 0
+ %vecext = extractelement <2 x i64> %vsl, i32 0
ret i64 %vecext
; CHECK-LABEL: @getsl0
; CHECK: mfvsrd 3, 34
@@ -1277,13 +1055,10 @@ entry:
; CHECK-LE: mfvsrd 3, [[SWP]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getsl1(<2 x i64> %vsl) {
entry:
- %vsl.addr = alloca <2 x i64>, align 16
- store <2 x i64> %vsl, <2 x i64>* %vsl.addr, align 16
- %0 = load <2 x i64>, <2 x i64>* %vsl.addr, align 16
- %vecext = extractelement <2 x i64> %0, i32 1
+ %vecext = extractelement <2 x i64> %vsl, i32 1
ret i64 %vecext
; CHECK-LABEL: @getsl1
; CHECK: xxswapd [[SWP:[0-9]+]], 34
@@ -1292,13 +1067,10 @@ entry:
; CHECK-LE: mfvsrd 3, 34
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getul0(<2 x i64> %vul) {
entry:
- %vul.addr = alloca <2 x i64>, align 16
- store <2 x i64> %vul, <2 x i64>* %vul.addr, align 16
- %0 = load <2 x i64>, <2 x i64>* %vul.addr, align 16
- %vecext = extractelement <2 x i64> %0, i32 0
+ %vecext = extractelement <2 x i64> %vul, i32 0
ret i64 %vecext
; CHECK-LABEL: @getul0
; CHECK: mfvsrd 3, 34
@@ -1307,13 +1079,10 @@ entry:
; CHECK-LE: mfvsrd 3, [[SWP]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getul1(<2 x i64> %vul) {
entry:
- %vul.addr = alloca <2 x i64>, align 16
- store <2 x i64> %vul, <2 x i64>* %vul.addr, align 16
- %0 = load <2 x i64>, <2 x i64>* %vul.addr, align 16
- %vecext = extractelement <2 x i64> %0, i32 1
+ %vecext = extractelement <2 x i64> %vul, i32 1
ret i64 %vecext
; CHECK-LABEL: @getul1
; CHECK: xxswapd [[SWP:[0-9]+]], 34
@@ -1322,45 +1091,30 @@ entry:
; CHECK-LE: mfvsrd 3, 34
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getvelsl(<2 x i64> %vsl, i32 signext %i) {
entry:
- %vsl.addr = alloca <2 x i64>, align 16
- %i.addr = alloca i32, align 4
- store <2 x i64> %vsl, <2 x i64>* %vsl.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <2 x i64>, <2 x i64>* %vsl.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <2 x i64> %0, i32 %1
+ %vecext = extractelement <2 x i64> %vsl, i32 %i
ret i64 %vecext
; CHECK-LABEL: @getvelsl
; CHECK-LE-LABEL: @getvelsl
; FIXME: add check patterns when variable element extraction is implemented
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define i64 @getvelul(<2 x i64> %vul, i32 signext %i) {
entry:
- %vul.addr = alloca <2 x i64>, align 16
- %i.addr = alloca i32, align 4
- store <2 x i64> %vul, <2 x i64>* %vul.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <2 x i64>, <2 x i64>* %vul.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <2 x i64> %0, i32 %1
+ %vecext = extractelement <2 x i64> %vul, i32 %i
ret i64 %vecext
; CHECK-LABEL: @getvelul
; CHECK-LE-LABEL: @getvelul
; FIXME: add check patterns when variable element extraction is implemented
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define float @getf0(<4 x float> %vf) {
entry:
- %vf.addr = alloca <4 x float>, align 16
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
- %0 = load <4 x float>, <4 x float>* %vf.addr, align 16
- %vecext = extractelement <4 x float> %0, i32 0
+ %vecext = extractelement <4 x float> %vf, i32 0
ret float %vecext
; CHECK-LABEL: @getf0
; CHECK: xscvspdpn 1, 34
@@ -1369,13 +1123,10 @@ entry:
; CHECK-LE: xscvspdpn 1, [[SHL]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define float @getf1(<4 x float> %vf) {
entry:
- %vf.addr = alloca <4 x float>, align 16
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
- %0 = load <4 x float>, <4 x float>* %vf.addr, align 16
- %vecext = extractelement <4 x float> %0, i32 1
+ %vecext = extractelement <4 x float> %vf, i32 1
ret float %vecext
; CHECK-LABEL: @getf1
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 1
@@ -1385,13 +1136,10 @@ entry:
; CHECK-LE: xscvspdpn 1, [[SHL]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define float @getf2(<4 x float> %vf) {
entry:
- %vf.addr = alloca <4 x float>, align 16
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
- %0 = load <4 x float>, <4 x float>* %vf.addr, align 16
- %vecext = extractelement <4 x float> %0, i32 2
+ %vecext = extractelement <4 x float> %vf, i32 2
ret float %vecext
; CHECK-LABEL: @getf2
; CHECK: xxswapd [[SHL:[0-9]+]], 34
@@ -1401,13 +1149,10 @@ entry:
; CHECK-LE: xscvspdpn 1, [[SHL]]
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define float @getf3(<4 x float> %vf) {
entry:
- %vf.addr = alloca <4 x float>, align 16
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
- %0 = load <4 x float>, <4 x float>* %vf.addr, align 16
- %vecext = extractelement <4 x float> %0, i32 3
+ %vecext = extractelement <4 x float> %vf, i32 3
ret float %vecext
; CHECK-LABEL: @getf3
; CHECK: xxsldwi [[SHL:[0-9]+]], 34, 34, 3
@@ -1416,29 +1161,20 @@ entry:
; CHECK-LE: xscvspdpn 1, 34
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define float @getvelf(<4 x float> %vf, i32 signext %i) {
entry:
- %vf.addr = alloca <4 x float>, align 16
- %i.addr = alloca i32, align 4
- store <4 x float> %vf, <4 x float>* %vf.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <4 x float>, <4 x float>* %vf.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <4 x float> %0, i32 %1
+ %vecext = extractelement <4 x float> %vf, i32 %i
ret float %vecext
; CHECK-LABEL: @getvelf
; CHECK-LE-LABEL: @getvelf
; FIXME: add check patterns when variable element extraction is implemented
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define double @getd0(<2 x double> %vd) {
entry:
- %vd.addr = alloca <2 x double>, align 16
- store <2 x double> %vd, <2 x double>* %vd.addr, align 16
- %0 = load <2 x double>, <2 x double>* %vd.addr, align 16
- %vecext = extractelement <2 x double> %0, i32 0
+ %vecext = extractelement <2 x double> %vd, i32 0
ret double %vecext
; CHECK-LABEL: @getd0
; CHECK: xxlor 1, 34, 34
@@ -1446,13 +1182,10 @@ entry:
; CHECK-LE: xxswapd 1, 34
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define double @getd1(<2 x double> %vd) {
entry:
- %vd.addr = alloca <2 x double>, align 16
- store <2 x double> %vd, <2 x double>* %vd.addr, align 16
- %0 = load <2 x double>, <2 x double>* %vd.addr, align 16
- %vecext = extractelement <2 x double> %0, i32 1
+ %vecext = extractelement <2 x double> %vd, i32 1
ret double %vecext
; CHECK-LABEL: @getd1
; CHECK: xxswapd 1, 34
@@ -1460,16 +1193,10 @@ entry:
; CHECK-LE: xxlor 1, 34, 34
}
-; Function Attrs: nounwind
+; Function Attrs: norecurse nounwind readnone
define double @getveld(<2 x double> %vd, i32 signext %i) {
entry:
- %vd.addr = alloca <2 x double>, align 16
- %i.addr = alloca i32, align 4
- store <2 x double> %vd, <2 x double>* %vd.addr, align 16
- store i32 %i, i32* %i.addr, align 4
- %0 = load <2 x double>, <2 x double>* %vd.addr, align 16
- %1 = load i32, i32* %i.addr, align 4
- %vecext = extractelement <2 x double> %0, i32 %1
+ %vecext = extractelement <2 x double> %vd, i32 %i
ret double %vecext
; CHECK-LABEL: @getveld
; CHECK-LE-LABEL: @getveld
diff --git a/test/CodeGen/PowerPC/ppc-crbits-onoff.ll b/test/CodeGen/PowerPC/ppc-crbits-onoff.ll
index fbf69d5319be..0e7f8f1bc668 100644
--- a/test/CodeGen/PowerPC/ppc-crbits-onoff.ll
+++ b/test/CodeGen/PowerPC/ppc-crbits-onoff.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -12,10 +13,16 @@ entry:
ret i32 %and
; CHECK-LABEL: @crbitsoff
+; CHECK-NO-ISEL-LABEL: @crbitsoff
; CHECK-DAG: cmplwi {{[0-9]+}}, 3, 0
; CHECK-DAG: li [[REG2:[0-9]+]], 1
; CHECK-DAG: cntlzw [[REG3:[0-9]+]],
; CHECK: isel [[REG4:[0-9]+]], 0, [[REG2]]
+; CHECK-NO-ISEL: bc 12, 2, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 4, 5, 0
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 4, 0, 0
; CHECK: and 3, [[REG4]], [[REG3]]
; CHECK: blr
}
@@ -29,11 +36,17 @@ entry:
ret i32 %and
; CHECK-LABEL: @crbitson
+; CHECK-NO-ISEL-LABEL: @crbitson
; CHECK-DAG: cmpwi {{[0-9]+}}, 3, 0
; CHECK-DAG: cmpwi {{[0-9]+}}, 4, 0
; CHECK-DAG: li [[REG2:[0-9]+]], 1
; CHECK-DAG: crorc [[REG3:[0-9]+]],
; CHECK: isel 3, 0, [[REG2]], [[REG3]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 0, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
diff --git a/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll b/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
index 08e39ed05117..10edefb2e21d 100644
--- a/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
+++ b/test/CodeGen/PowerPC/ppc-shrink-wrapping.ll
@@ -403,7 +403,7 @@ entry:
; CHECK: [[ELSE_LABEL]]
; CHECK-NEXT: slwi 3, 4, 1
; DISABLE: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
-; CHECK-NEXT blr
+; CHECK-NEXT: blr
;
define i32 @inlineAsm(i32 %cond, i32 %N) {
entry:
diff --git a/test/CodeGen/PowerPC/ppc64-align-long-double.ll b/test/CodeGen/PowerPC/ppc64-align-long-double.ll
index c3cccd5b2935..d59dc64dcf85 100644
--- a/test/CodeGen/PowerPC/ppc64-align-long-double.ll
+++ b/test/CodeGen/PowerPC/ppc64-align-long-double.ll
@@ -1,6 +1,6 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O0 -fast-isel=false -mattr=-vsx < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -O0 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-VSX %s
-; RUN: llc -verify-machineinstrs -mcpu=pwr9 -O0 -fast-isel=false -mattr=+vsx < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr9 -O0 -fast-isel=false -mattr=+vsx < %s | FileCheck -check-prefix=CHECK-P9 %s
; Verify internal alignment of long double in a struct. The double
; argument comes in in GPR3; GPR4 is skipped; GPRs 5 and 6 contain
@@ -19,19 +19,44 @@ entry:
ret ppc_fp128 %0
}
+; The additional stores are caused because we forward the value in the
+; store->load->bitcast path to make a store and bitcast of the same
+; value. Since the target does bitcast through memory and we no longer
+; remember the address we need to do the store in a fresh local
+; address.
+
; CHECK-DAG: std 6, 72(1)
; CHECK-DAG: std 5, 64(1)
; CHECK-DAG: std 4, 56(1)
; CHECK-DAG: std 3, 48(1)
-; CHECK: lfd 1, 64(1)
-; CHECK: lfd 2, 72(1)
+
+; CHECK-DAG: std 5, -16(1)
+; CHECK-DAG: std 6, -8(1)
+; CHECK-DAG: lfd 1, -16(1)
+; CHECK-DAG: lfd 2, -8(1)
+
+; FIXMECHECK: lfd 1, 64(1)
+; FIXMECHECK: lfd 2, 72(1)
; CHECK-VSX-DAG: std 6, 72(1)
; CHECK-VSX-DAG: std 5, 64(1)
; CHECK-VSX-DAG: std 4, 56(1)
; CHECK-VSX-DAG: std 3, 48(1)
-; CHECK-VSX: li 3, 16
-; CHECK-VSX: addi 4, 1, 48
-; CHECK-VSX: lxsdx 1, 4, 3
-; CHECK-VSX: li 3, 24
-; CHECK-VSX: lxsdx 2, 4, 3
+; CHECK-VSX-DAG: std 5, -16(1)
+; CHECK-VSX-DAG: std 6, -8(1)
+; CHECK-VSX: addi 3, 1, -16
+; CHECK-VSX: lxsdx 1, 0, 3
+; CHECK-VSX: addi 3, 1, -8
+; CHECK-VSX: lxsdx 2, 0, 3
+
+; FIXME-VSX: addi 4, 1, 48
+; FIXME-VSX: lxsdx 1, 4, 3
+; FIXME-VSX: li 3, 24
+; FIXME-VSX: lxsdx 2, 4, 3
+
+; CHECK-P9: std 6, 72(1)
+; CHECK-P9: std 5, 64(1)
+; CHECK-P9: std 4, 56(1)
+; CHECK-P9: std 3, 48(1)
+; CHECK-P9: mtvsrd 1, 5
+; CHECK-P9: mtvsrd 2, 6
diff --git a/test/CodeGen/PowerPC/ppc64-gep-opt.ll b/test/CodeGen/PowerPC/ppc64-gep-opt.ll
index 1a78310ddf32..d1ae1bcbd88c 100644
--- a/test/CodeGen/PowerPC/ppc64-gep-opt.ll
+++ b/test/CodeGen/PowerPC/ppc64-gep-opt.ll
@@ -84,9 +84,9 @@ exit:
; CHECK-NoAA: add i64 [[TMP:%[a-zA-Z0-9]+]], 528
; CHECK-NoAA: add i64 [[TMP]], 532
; CHECK-NoAA: if.true:
-; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = add i64 [[TMP]], 532
+; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, i8* {{.*}}, i64 532
; CHECK-NoAA: exit:
-; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = add i64 [[TMP]], 528
+; CHECK-NoAA: {{%sunk[a-zA-Z0-9]+}} = getelementptr i8, i8* {{.*}}, i64 528
; CHECK-UseAA-LABEL: test_GEP_across_BB(
; CHECK-UseAA: [[PTR0:%[a-zA-Z0-9]+]] = getelementptr
diff --git a/test/CodeGen/PowerPC/ppc64le-aggregates.ll b/test/CodeGen/PowerPC/ppc64le-aggregates.ll
index 25b3e5d89331..6fcbdda4e34f 100644
--- a/test/CodeGen/PowerPC/ppc64le-aggregates.ll
+++ b/test/CodeGen/PowerPC/ppc64le-aggregates.ll
@@ -284,10 +284,7 @@ entry:
; CHECK-DAG: lfs 12, 12({{[0-9]+}})
; CHECK-DAG: lfs 13, 16({{[0-9]+}})
-; CHECK-DAG: lwz [[REG0:[0-9]+]], 0({{[0-9]+}})
-; CHECK-DAG: lwz [[REG1:[0-9]+]], 4({{[0-9]+}})
-; CHECK-DAG: sldi [[REG2:[0-9]+]], [[REG1]], 32
-; CHECK-DAG: or 10, [[REG0]], [[REG2]]
+; CHECK-DAG: ld 10, 0({{[0-9]+}})
; CHECK: bl test2
declare void @test2([8 x float], [5 x float], [2 x float])
diff --git a/test/CodeGen/PowerPC/pr30451.ll b/test/CodeGen/PowerPC/pr30451.ll
index 930553451cf8..9b07df00f9c3 100644
--- a/test/CodeGen/PowerPC/pr30451.ll
+++ b/test/CodeGen/PowerPC/pr30451.ll
@@ -3,11 +3,11 @@ define i8 @atomic_min_i8() {
top:
%0 = alloca i8, align 2
%1 = bitcast i8* %0 to i8*
- call void @llvm.lifetime.start(i64 2, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
store i8 -1, i8* %0, align 2
%2 = atomicrmw min i8* %0, i8 0 acq_rel
%3 = load atomic i8, i8* %0 acquire, align 8
- call void @llvm.lifetime.end(i64 2, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
ret i8 %3
; CHECK-LABEL: atomic_min_i8
; CHECK: lbarx [[DST:[0-9]+]],
@@ -19,11 +19,11 @@ define i16 @atomic_min_i16() {
top:
%0 = alloca i16, align 2
%1 = bitcast i16* %0 to i8*
- call void @llvm.lifetime.start(i64 2, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
store i16 -1, i16* %0, align 2
%2 = atomicrmw min i16* %0, i16 0 acq_rel
%3 = load atomic i16, i16* %0 acquire, align 8
- call void @llvm.lifetime.end(i64 2, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
ret i16 %3
; CHECK-LABEL: atomic_min_i16
; CHECK: lharx [[DST:[0-9]+]],
@@ -36,11 +36,11 @@ define i8 @atomic_max_i8() {
top:
%0 = alloca i8, align 2
%1 = bitcast i8* %0 to i8*
- call void @llvm.lifetime.start(i64 2, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
store i8 -1, i8* %0, align 2
%2 = atomicrmw max i8* %0, i8 0 acq_rel
%3 = load atomic i8, i8* %0 acquire, align 8
- call void @llvm.lifetime.end(i64 2, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
ret i8 %3
; CHECK-LABEL: atomic_max_i8
; CHECK: lbarx [[DST:[0-9]+]],
@@ -52,11 +52,11 @@ define i16 @atomic_max_i16() {
top:
%0 = alloca i16, align 2
%1 = bitcast i16* %0 to i8*
- call void @llvm.lifetime.start(i64 2, i8* %1)
+ call void @llvm.lifetime.start.p0i8(i64 2, i8* %1)
store i16 -1, i16* %0, align 2
%2 = atomicrmw max i16* %0, i16 0 acq_rel
%3 = load atomic i16, i16* %0 acquire, align 8
- call void @llvm.lifetime.end(i64 2, i8* %1)
+ call void @llvm.lifetime.end.p0i8(i64 2, i8* %1)
ret i16 %3
; CHECK-LABEL: atomic_max_i16
; CHECK: lharx [[DST:[0-9]+]],
@@ -65,5 +65,5 @@ define i16 @atomic_max_i16() {
; CHECK-NEXT: ble 0
}
-declare void @llvm.lifetime.start(i64, i8*)
-declare void @llvm.lifetime.end(i64, i8*)
+declare void @llvm.lifetime.start.p0i8(i64, i8*)
+declare void @llvm.lifetime.end.p0i8(i64, i8*)
diff --git a/test/CodeGen/PowerPC/pr32063.ll b/test/CodeGen/PowerPC/pr32063.ll
new file mode 100644
index 000000000000..f031ec83c55e
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr32063.ll
@@ -0,0 +1,16 @@
+; RUN: llc -O2 < %s | FileCheck %s
+target triple = "powerpc64le-linux-gnu"
+
+define void @foo(i32 %v, i16* %p) {
+ %1 = and i32 %v, -65536
+ %2 = tail call i32 @llvm.bswap.i32(i32 %1)
+ %conv = trunc i32 %2 to i16
+ store i16 %conv, i16* %p
+ ret void
+
+; CHECK: srwi
+; CHECK: sthbrx
+; CHECK-NOT: stwbrx
+}
+
+declare i32 @llvm.bswap.i32(i32)
diff --git a/test/CodeGen/PowerPC/pr32140.ll b/test/CodeGen/PowerPC/pr32140.ll
new file mode 100644
index 000000000000..827a90404e4b
--- /dev/null
+++ b/test/CodeGen/PowerPC/pr32140.ll
@@ -0,0 +1,59 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=powerpc64le-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+; RUN: llc -mtriple=powerpc64-linux-gnu -mcpu=pwr8 < %s | FileCheck %s
+
+@as = common local_unnamed_addr global i16 0, align 2
+@bs = common local_unnamed_addr global i16 0, align 2
+@ai = common local_unnamed_addr global i32 0, align 4
+@bi = common local_unnamed_addr global i32 0, align 4
+
+define void @bswapStorei64Toi32() {
+; CHECK-LABEL: bswapStorei64Toi32:
+; CHECK: # BB#0: # %entry
+; CHECK: lwa 3, 0(3)
+; CHECK-NEXT: rldicl 3, 3, 32, 32
+; CHECK-NEXT: stwbrx 3, 0, 4
+; CHECK-NEXT: blr
+entry:
+ %0 = load i32, i32* @ai, align 4
+ %conv.i = sext i32 %0 to i64
+ %or26.i = tail call i64 @llvm.bswap.i64(i64 %conv.i)
+ %conv = trunc i64 %or26.i to i32
+ store i32 %conv, i32* @bi, align 4
+ ret void
+}
+
+define void @bswapStorei32Toi16() {
+; CHECK-LABEL: bswapStorei32Toi16:
+; CHECK: # BB#0: # %entry
+; CHECK: lha 3, 0(3)
+; CHECK-NEXT: srwi 3, 3, 16
+; CHECK-NEXT: sthbrx 3, 0, 4
+; CHECK-NEXT: blr
+entry:
+ %0 = load i16, i16* @as, align 2
+ %conv.i = sext i16 %0 to i32
+ %or26.i = tail call i32 @llvm.bswap.i32(i32 %conv.i)
+ %conv = trunc i32 %or26.i to i16
+ store i16 %conv, i16* @bs, align 2
+ ret void
+}
+
+define void @bswapStorei64Toi16() {
+; CHECK-LABEL: bswapStorei64Toi16:
+; CHECK: # BB#0: # %entry
+; CHECK: lha 3, 0(3)
+; CHECK-NEXT: rldicl 3, 3, 16, 48
+; CHECK-NEXT: sthbrx 3, 0, 4
+; CHECK-NEXT: blr
+entry:
+ %0 = load i16, i16* @as, align 2
+ %conv.i = sext i16 %0 to i64
+ %or26.i = tail call i64 @llvm.bswap.i64(i64 %conv.i)
+ %conv = trunc i64 %or26.i to i16
+ store i16 %conv, i16* @bs, align 2
+ ret void
+}
+
+declare i32 @llvm.bswap.i32(i32)
+declare i64 @llvm.bswap.i64(i64)
diff --git a/test/CodeGen/PowerPC/pristine-and-livein.mir b/test/CodeGen/PowerPC/pristine-and-livein.mir
new file mode 100644
index 000000000000..6d93bb68c102
--- /dev/null
+++ b/test/CodeGen/PowerPC/pristine-and-livein.mir
@@ -0,0 +1,330 @@
+# RUN: llc -run-pass=post-RA-sched %s -o - | FileCheck %s
+
+# CHECK: callee-saved-register: '[[REG:%x[0-9]+]]'
+# CHECK: callee-saved-register: '{{%x[0-9]+}}'
+# CHECK-NOT: [[REG]] = LI8 0
+# CHECK: STD killed [[REG]],
+--- |
+ ; ModuleID = '<stdin>'
+ source_filename = "bugpoint-output-4d91ae2.bc"
+ target datalayout = "e-m:e-i64:64-n32:64"
+ target triple = "powerpc64le--linux-gnu"
+
+ ; Function Attrs: norecurse nounwind readonly
+ define i64 @adler32_z(i64 %adler, i8* readonly %buf, i64 %len) local_unnamed_addr #0 {
+ entry:
+ %shr = lshr i64 %adler, 16
+ %and = and i64 %shr, 65535
+ %and1 = and i64 %adler, 65535
+ br i1 undef, label %if.then, label %if.end15
+
+ if.then: ; preds = %entry
+ %add5 = add nsw i64 %and1, %and
+ %sub9 = add nsw i64 %add5, 281474976645135
+ %shl = shl i64 %add5, 16
+ %or = or i64 %shl, %and1
+ br label %cleanup
+
+ if.end15: ; preds = %entry
+ br i1 undef, label %while.cond.preheader, label %while.cond30.preheader
+
+ while.cond30.preheader: ; preds = %if.end15
+ br i1 undef, label %while.body33.preheader, label %while.body109.preheader
+
+ while.body33.preheader: ; preds = %while.cond30.preheader
+ br label %while.body33
+
+ while.cond.preheader: ; preds = %if.end15
+ %sub25 = add i64 %and1, -65521
+ %rem = urem i64 %and, 65521
+ %shl27 = shl nuw nsw i64 %rem, 16
+ %or28 = or i64 %shl27, %and1
+ br label %cleanup
+
+ while.body33: ; preds = %do.end, %while.body33.preheader
+ %indvar = phi i64 [ %indvar.next, %do.end ], [ 0, %while.body33.preheader ]
+ %sum2.2385 = phi i64 [ %rem102, %do.end ], [ %and, %while.body33.preheader ]
+ %len.addr.1384 = phi i64 [ %sub34, %do.end ], [ %len, %while.body33.preheader ]
+ %buf.addr.1383 = phi i8* [ %scevgep390, %do.end ], [ %buf, %while.body33.preheader ]
+ %adler.addr.3382 = phi i64 [ %rem101, %do.end ], [ %and1, %while.body33.preheader ]
+ %0 = mul i64 %indvar, 5552
+ %1 = add i64 %0, -13
+ %scevgep2 = getelementptr i8, i8* %buf, i64 %1
+ %sub34 = add i64 %len.addr.1384, -5552
+ call void @llvm.ppc.mtctr.i64(i64 347)
+ br label %do.body
+
+ do.body: ; preds = %do.body, %while.body33
+ %adler.addr.4 = phi i64 [ %adler.addr.3382, %while.body33 ], [ %add49, %do.body ]
+ %sum2.3 = phi i64 [ %sum2.2385, %while.body33 ], [ %add98, %do.body ]
+ %tmp15.phi = phi i8* [ %scevgep2, %while.body33 ], [ %tmp15.inc, %do.body ]
+ %tmp15.inc = getelementptr i8, i8* %tmp15.phi, i64 16
+ %add38 = add i64 %adler.addr.4, %sum2.3
+ %add42 = add i64 %add38, %adler.addr.4
+ %add46 = add i64 %add42, %adler.addr.4
+ %tmp15 = load i8, i8* %tmp15.inc, align 1, !tbaa !1
+ %conv48 = zext i8 %tmp15 to i64
+ %add49 = add i64 %adler.addr.4, %conv48
+ %add50 = add i64 %add46, %add49
+ %add54 = add i64 %add50, %add49
+ %add58 = add i64 %add54, %add49
+ %add62 = add i64 %add58, %add49
+ %add66 = add i64 %add62, %add49
+ %add70 = add i64 %add66, %add49
+ %add74 = add i64 %add70, %add49
+ %add78 = add i64 %add74, %add49
+ %add82 = add i64 %add78, %add49
+ %add86 = add i64 %add82, %add49
+ %add90 = add i64 %add86, %add49
+ %add94 = add i64 %add90, %add49
+ %add98 = add i64 %add94, %add49
+ %2 = call i1 @llvm.ppc.is.decremented.ctr.nonzero()
+ br i1 %2, label %do.body, label %do.end
+
+ do.end: ; preds = %do.body
+ %scevgep390 = getelementptr i8, i8* %buf.addr.1383, i64 5552
+ %rem101 = urem i64 %add49, 65521
+ %rem102 = urem i64 %add98, 65521
+ %cmp31 = icmp ugt i64 %sub34, 5551
+ %indvar.next = add i64 %indvar, 1
+ br i1 %cmp31, label %while.body33, label %while.end103
+
+ while.end103: ; preds = %do.end
+ br i1 undef, label %if.end188, label %while.body109.preheader
+
+ while.body109.preheader: ; preds = %while.end103, %while.cond30.preheader
+ %buf.addr.1.lcssa394400 = phi i8* [ %buf, %while.cond30.preheader ], [ %scevgep390, %while.end103 ]
+ %arrayidx151 = getelementptr inbounds i8, i8* %buf.addr.1.lcssa394400, i64 10
+ %tmp45 = load i8, i8* %arrayidx151, align 1, !tbaa !1
+ %conv152 = zext i8 %tmp45 to i64
+ br label %while.body109
+
+ while.body109: ; preds = %while.body109, %while.body109.preheader
+ %adler.addr.5373 = phi i64 [ %add153, %while.body109 ], [ undef, %while.body109.preheader ]
+ %add153 = add i64 %adler.addr.5373, %conv152
+ br label %while.body109
+
+ if.end188: ; preds = %while.end103
+ %shl189 = shl nuw nsw i64 %rem102, 16
+ %or190 = or i64 %shl189, %rem101
+ br label %cleanup
+
+ cleanup: ; preds = %if.end188, %while.cond.preheader, %if.then
+ %retval.0 = phi i64 [ %or, %if.then ], [ %or28, %while.cond.preheader ], [ %or190, %if.end188 ]
+ ret i64 %retval.0
+ }
+
+ ; Function Attrs: nounwind
+ declare void @llvm.ppc.mtctr.i64(i64) #1
+
+ ; Function Attrs: nounwind
+ declare i1 @llvm.ppc.is.decremented.ctr.nonzero() #1
+
+ ; Function Attrs: nounwind
+ declare void @llvm.stackprotector(i8*, i8**) #1
+
+ attributes #0 = { norecurse nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+power8-vector,+vsx,-power9-vector,-qpx" "unsafe-fp-math"="false" "use-soft-float"="false" }
+ attributes #1 = { nounwind }
+
+ !llvm.ident = !{!0}
+
+ !0 = !{!"clang version 5.0.0 "}
+ !1 = !{!2, !2, i64 0}
+ !2 = !{!"omnipotent char", !3, i64 0}
+ !3 = !{!"Simple C/C++ TBAA"}
+
+...
+---
+name: adler32_z
+alignment: 4
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+ - { reg: '%x3' }
+ - { reg: '%x4' }
+ - { reg: '%x5' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 0
+ offsetAdjustment: 0
+ maxAlignment: 0
+ adjustsStack: false
+ hasCalls: false
+ maxCallFrameSize: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+fixedStack:
+ - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '%x30' }
+ - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%x29' }
+ - { id: 2, offset: -8, size: 8, alignment: 8, isImmutable: true, isAliased: false }
+body: |
+ bb.0.entry:
+ successors: %bb.1.if.then(0x40000000), %bb.3.if.end15(0x40000000)
+ liveins: %x3, %x4, %x5, %x29, %x30
+
+ %x6 = RLWINM8 %x3, 16, 16, 31
+ %x3 = RLDICL killed %x3, 0, 48
+ BC undef %cr5lt, %bb.3.if.end15
+
+ bb.1.if.then:
+ successors: %bb.2.if.then(0x80000000)
+ liveins: %x3, %x6, %x29, %x30
+
+ %x4 = ADD8 %x3, killed %x6
+
+ bb.2.if.then:
+ liveins: %lr8, %rm, %x3, %x4
+
+ %x4 = RLDICR killed %x4, 16, 47
+ %x3 = OR8 killed %x4, killed %x3
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+
+ bb.3.if.end15:
+ successors: %bb.6.while.cond.preheader(0x40000000), %bb.4.while.cond30.preheader(0x40000000)
+ liveins: %x3, %x4, %x5, %x6, %x29, %x30
+
+ BC undef %cr5lt, %bb.6.while.cond.preheader
+
+ bb.4.while.cond30.preheader:
+ successors: %bb.7.while.body33.preheader(0x40000000), %bb.5(0x40000000)
+ liveins: %x3, %x4, %x5, %x6, %x29, %x30
+
+ BCn undef %cr5lt, %bb.7.while.body33.preheader
+
+ bb.5:
+ successors: %bb.12.while.body109.preheader(0x80000000)
+ liveins: %x4, %x29, %x30
+
+ %x7 = OR8 %x4, killed %x4
+ B %bb.12.while.body109.preheader
+
+ bb.6.while.cond.preheader:
+ successors: %bb.2.if.then(0x80000000)
+ liveins: %x3, %x6, %x29, %x30
+
+ %x4 = LIS8 15
+ %x4 = ORI8 killed %x4, 225
+ %x4 = RLDICR killed %x4, 32, 31
+ %x4 = ORIS8 killed %x4, 3375
+ %x4 = ORI8 killed %x4, 50637
+ %x4 = MULHDU %x6, killed %x4
+ %x5 = SUBF8 %x4, %x6
+ %x5 = RLDICL killed %x5, 63, 1
+ %x4 = ADD8 killed %x5, killed %x4
+ %x5 = LI8 0
+ %x4 = RLDICL killed %x4, 49, 15
+ %x5 = ORI8 killed %x5, 65521
+ %x4 = MULLD killed %x4, killed %x5
+ %x4 = SUBF8 killed %x4, killed %x6
+ B %bb.2.if.then
+
+ bb.7.while.body33.preheader:
+ successors: %bb.8.while.body33(0x80000000)
+ liveins: %x3, %x4, %x5, %x6, %x29, %x30
+
+ STD killed %x29, -24, %x1 :: (store 8 into %fixed-stack.1)
+ STD killed %x30, -16, %x1 :: (store 8 into %fixed-stack.0, align 16)
+ %x7 = LIS8 15
+ %x7 = ORI8 killed %x7, 225
+ %x7 = RLDICR killed %x7, 32, 31
+ %x8 = LI8 0
+ %x7 = ORIS8 killed %x7, 3375
+ %x9 = LI8 347
+ %x10 = ORI8 killed %x7, 50637
+ %x11 = ORI8 %x8, 65521
+ %x7 = OR8 %x4, %x4
+
+ bb.8.while.body33:
+ successors: %bb.9.do.body(0x80000000)
+ liveins: %x3, %x4, %x5, %x6, %x7, %x8, %x9, %x10, %x11
+
+ %x12 = MULLI8 %x8, 5552
+ %x12 = ADD8 %x4, killed %x12
+ %x12 = ADDI8 killed %x12, -13
+ %x5 = ADDI8 killed %x5, -5552
+ MTCTR8loop %x9, implicit-def dead %ctr8
+
+ bb.9.do.body:
+ successors: %bb.9.do.body(0x7c000000), %bb.10.do.end(0x04000000)
+ liveins: %x3, %x4, %x5, %x6, %x7, %x8, %x9, %x10, %x11, %x12
+
+ %x0, %x12 = LBZU8 16, killed %x12 :: (load 1 from %ir.tmp15.inc, !tbaa !1)
+ %x6 = ADD8 %x3, killed %x6
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x3 = ADD8 killed %x3, killed %x0
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ %x6 = ADD8 killed %x6, %x3
+ BDNZ8 %bb.9.do.body, implicit-def %ctr8, implicit %ctr8
+
+ bb.10.do.end:
+ successors: %bb.8.while.body33(0x7c000000), %bb.11.while.end103(0x04000000)
+ liveins: %x3, %x4, %x5, %x6, %x7, %x8, %x9, %x10, %x11
+
+ %x12 = MULHDU %x3, %x10
+ %x0 = MULHDU %x6, %x10
+ %x30 = SUBF8 %x12, %x3
+ %x29 = SUBF8 %x0, %x6
+ %x30 = RLDICL killed %x30, 63, 1
+ %x29 = RLDICL killed %x29, 63, 1
+ %x12 = ADD8 killed %x30, killed %x12
+ %x0 = ADD8 killed %x29, killed %x0
+ %cr0 = CMPLDI %x5, 5551
+ %x12 = RLDICL killed %x12, 49, 15
+ %x0 = RLDICL killed %x0, 49, 15
+ %x12 = MULLD killed %x12, %x11
+ %x0 = MULLD killed %x0, %x11
+ %x7 = ADDI8 killed %x7, 5552
+ %x3 = SUBF8 killed %x12, killed %x3
+ %x6 = SUBF8 killed %x0, killed %x6
+ %x8 = ADDI8 killed %x8, 1
+ BCC 44, killed %cr0, %bb.8.while.body33
+
+ bb.11.while.end103:
+ successors: %bb.14.if.end188(0x40000000), %bb.12.while.body109.preheader(0x40000000)
+ liveins: %x3, %x6, %x7
+
+ %x30 = LD -16, %x1 :: (load 8 from %fixed-stack.0, align 16)
+ %x29 = LD -24, %x1 :: (load 8 from %fixed-stack.1)
+ BC undef %cr5lt, %bb.14.if.end188
+
+ bb.12.while.body109.preheader:
+ successors: %bb.13.while.body109(0x80000000)
+ liveins: %x7, %x29, %x30
+
+ %x3 = LBZ8 10, killed %x7 :: (load 1 from %ir.arrayidx151, !tbaa !1)
+ %x4 = IMPLICIT_DEF
+
+ bb.13.while.body109:
+ successors: %bb.13.while.body109(0x80000000)
+ liveins: %x3, %x4, %x29, %x30
+
+ %x4 = ADD8 killed %x4, %x3
+ B %bb.13.while.body109
+
+ bb.14.if.end188:
+ liveins: %x3, %x6, %x29, %x30
+
+ %x4 = RLDICR killed %x6, 16, 47
+ %x3 = OR8 killed %x4, killed %x3
+ BLR8 implicit %lr8, implicit %rm, implicit %x3
+
+...
diff --git a/test/CodeGen/PowerPC/select-i1-vs-i1.ll b/test/CodeGen/PowerPC/select-i1-vs-i1.ll
index a8f1ef1dd284..b7beb8165fdf 100644
--- a/test/CodeGen/PowerPC/select-i1-vs-i1.ll
+++ b/test/CodeGen/PowerPC/select-i1-vs-i1.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -15,10 +16,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32slt
+; CHECK-NO-ISEL-LABEL: @testi32slt
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -30,11 +38,17 @@ entry:
%cond = select i1 %cmp3, i32 %a1, i32 %a2
ret i32 %cond
-; CHECK-LABEL: @testi32ult
+; CHECK-NO-ISEL-LABEL: @testi32ult
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -47,10 +61,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32sle
+; CHECK-NO-ISEL-LABEL: @testi32sle
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -63,10 +84,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32ule
+; CHECK-NO-ISEL-LABEL: @testi32ule
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -79,10 +107,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32eq
+; CHECK-NO-ISEL-LABEL: @testi32eq
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: creqv [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -95,10 +130,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32sge
+; CHECK-NO-ISEL-LABEL: @testi32sge
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -111,10 +153,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32uge
+; CHECK-NO-ISEL-LABEL: @testi32uge
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -127,10 +176,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32sgt
+; CHECK-NO-ISEL-LABEL: @testi32sgt
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -143,10 +199,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32ugt
+; CHECK-NO-ISEL-LABEL: @testi32ugt
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -159,10 +222,17 @@ entry:
ret i32 %cond
; CHECK-LABEL: @testi32ne
+; CHECK-NO-ISEL-LABEL: @testi32ne
; CHECK-DAG: cmpw {{[0-9]+}}, 5, 6
; CHECK-DAG: cmpw {{[0-9]+}}, 3, 4
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -175,10 +245,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64slt
+; CHECK-NO-ISEL-LABEL: @testi64slt
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -191,10 +268,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64ult
+; CHECK-NO-ISEL-LABEL: @testi64ult
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -207,10 +291,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64sle
+; CHECK-NO-ISEL-LABEL: @testi64sle
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -223,10 +314,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64ule
+; CHECK-NO-ISEL-LABEL: @testi64ule
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -239,10 +337,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64eq
+; CHECK-NO-ISEL-LABEL: @testi64eq
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: creqv [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -255,10 +360,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64sge
+; CHECK-NO-ISEL-LABEL: @testi64sge
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -271,10 +383,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64uge
+; CHECK-NO-ISEL-LABEL: @testi64uge
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -287,10 +406,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64sgt
+; CHECK-NO-ISEL-LABEL: @testi64sgt
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -303,10 +429,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64ugt
+; CHECK-NO-ISEL-LABEL: @testi64ugt
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -319,10 +452,17 @@ entry:
ret i64 %cond
; CHECK-LABEL: @testi64ne
+; CHECK-NO-ISEL-LABEL: @testi64ne
; CHECK-DAG: cmpd {{([0-9]+, )?}}5, 6
; CHECK-DAG: cmpd {{([0-9]+, )?}}3, 4
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: isel 3, 7, 8, [[REG1]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 3, 8, 0
+; CHECK-NO-ISEL-NEXT: blr
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 3, 7, 0
+; CHECK-NO-ISEL-NEXT: blr
; CHECK: blr
}
@@ -719,7 +859,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -736,7 +876,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -753,7 +893,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -770,7 +910,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -787,9 +927,9 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bc 12, [[REG1]], .LBB[[BB1:[0-9_]+]]
-; CHECK: vor 3, 2, 2
+; CHECK: vmr 3, 2
; CHECK: .LBB[[BB1]]
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -806,7 +946,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -823,7 +963,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -840,7 +980,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -857,7 +997,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -874,7 +1014,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -922,7 +1062,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -939,7 +1079,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -956,7 +1096,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -973,7 +1113,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -990,9 +1130,9 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bc 12, [[REG1]], .LBB[[BB55:[0-9_]+]]
-; CHECK: vor 3, 2, 2
+; CHECK: vmr 3, 2
; CHECK: .LBB[[BB55]]
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -1009,7 +1149,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -1026,7 +1166,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crorc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -1043,7 +1183,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -1060,7 +1200,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crandc [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
@@ -1077,7 +1217,7 @@ entry:
; CHECK-DAG: fcmpu {{[0-9]+}}, 1, 2
; CHECK: crxor [[REG1:[0-9]+]], {{[0-9]+}}, {{[0-9]+}}
; CHECK: bclr 12, [[REG1]], 0
-; CHECK: vor 2, 3, 3
+; CHECK: vmr 2, 3
; CHECK: blr
}
diff --git a/test/CodeGen/PowerPC/select_const.ll b/test/CodeGen/PowerPC/select_const.ll
new file mode 100644
index 000000000000..29548123be88
--- /dev/null
+++ b/test/CodeGen/PowerPC/select_const.ll
@@ -0,0 +1,789 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs -mattr=+isel | FileCheck %s --check-prefix=ALL --check-prefix=ISEL
+; RUN: llc < %s -mtriple=powerpc64le-unknown-unknown -verify-machineinstrs -mattr=-isel | FileCheck %s --check-prefix=ALL --check-prefix=NO_ISEL
+
+; Select of constants: control flow / conditional moves can always be replaced by logic+math (but may not be worth it?).
+; Test the zeroext/signext variants of each pattern to see if that makes a difference.
+
+; select Cond, 0, 1 --> zext (!Cond)
+
+define i32 @select_0_or_1(i1 %cond) {
+; ALL-LABEL: select_0_or_1:
+; ALL: # BB#0:
+; ALL-NEXT: not 3, 3
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_1_zeroext(i1 zeroext %cond) {
+; ALL-LABEL: select_0_or_1_zeroext:
+; ALL: # BB#0:
+; ALL-NEXT: xori 3, 3, 1
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_1_signext(i1 signext %cond) {
+; ALL-LABEL: select_0_or_1_signext:
+; ALL: # BB#0:
+; ALL-NEXT: not 3, 3
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 1
+ ret i32 %sel
+}
+
+; select Cond, 1, 0 --> zext (Cond)
+
+define i32 @select_1_or_0(i1 %cond) {
+; ALL-LABEL: select_1_or_0:
+; ALL: # BB#0:
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_1_or_0_zeroext(i1 zeroext %cond) {
+; ALL-LABEL: select_1_or_0_zeroext:
+; ALL: # BB#0:
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_1_or_0_signext(i1 signext %cond) {
+; ALL-LABEL: select_1_or_0_signext:
+; ALL: # BB#0:
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 1, i32 0
+ ret i32 %sel
+}
+
+; select Cond, 0, -1 --> sext (!Cond)
+
+define i32 @select_0_or_neg1(i1 %cond) {
+; ISEL-LABEL: select_0_or_neg1:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 0, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_0_or_neg1:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bc 12, 1, .LBB6_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB6_1:
+; NO_ISEL-NEXT: addi 3, 0, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 -1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
+; ISEL-LABEL: select_0_or_neg1_zeroext:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 0, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_0_or_neg1_zeroext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bc 12, 1, .LBB7_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB7_1:
+; NO_ISEL-NEXT: addi 3, 0, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 -1
+ ret i32 %sel
+}
+
+define i32 @select_0_or_neg1_signext(i1 signext %cond) {
+; ISEL-LABEL: select_0_or_neg1_signext:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 0, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_0_or_neg1_signext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bc 12, 1, .LBB8_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB8_1:
+; NO_ISEL-NEXT: addi 3, 0, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 0, i32 -1
+ ret i32 %sel
+}
+
+; select Cond, -1, 0 --> sext (Cond)
+
+define i32 @select_neg1_or_0(i1 %cond) {
+; ISEL-LABEL: select_neg1_or_0:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_neg1_or_0:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 -1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_neg1_or_0_zeroext(i1 zeroext %cond) {
+; ISEL-LABEL: select_neg1_or_0_zeroext:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_neg1_or_0_zeroext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 -1, i32 0
+ ret i32 %sel
+}
+
+define i32 @select_neg1_or_0_signext(i1 signext %cond) {
+; ISEL-LABEL: select_neg1_or_0_signext:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: ori 3, 3, 65535
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_neg1_or_0_signext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: ori 3, 3, 65535
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 -1, i32 0
+ ret i32 %sel
+}
+
+; select Cond, C+1, C --> add (zext Cond), C
+
+define i32 @select_Cplus1_C(i1 %cond) {
+; ALL-LABEL: select_Cplus1_C:
+; ALL: # BB#0:
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: addi 3, 3, 41
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
+; ALL-LABEL: select_Cplus1_C_zeroext:
+; ALL: # BB#0:
+; ALL-NEXT: addi 3, 3, 41
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+define i32 @select_Cplus1_C_signext(i1 signext %cond) {
+; ALL-LABEL: select_Cplus1_C_signext:
+; ALL: # BB#0:
+; ALL-NEXT: subfic 3, 3, 41
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 42, i32 41
+ ret i32 %sel
+}
+
+; select Cond, C, C+1 --> add (sext Cond), C
+
+define i32 @select_C_Cplus1(i1 %cond) {
+; ALL-LABEL: select_C_Cplus1:
+; ALL: # BB#0:
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: subfic 3, 3, 42
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
+; ALL-LABEL: select_C_Cplus1_zeroext:
+; ALL: # BB#0:
+; ALL-NEXT: subfic 3, 3, 42
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C_Cplus1_signext(i1 signext %cond) {
+; ALL-LABEL: select_C_Cplus1_signext:
+; ALL: # BB#0:
+; ALL-NEXT: addi 3, 3, 42
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i32 41, i32 42
+ ret i32 %sel
+}
+
+; In general, select of 2 constants could be:
+; select Cond, C1, C2 --> add (mul (zext Cond), C1-C2), C2 --> add (and (sext Cond), C1-C2), C2
+
+define i32 @select_C1_C2(i1 %cond) {
+; ISEL-LABEL: select_C1_C2:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 421
+; ISEL-NEXT: li 3, 42
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_C1_C2:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 421
+; NO_ISEL-NEXT: li 3, 42
+; NO_ISEL-NEXT: bc 12, 1, .LBB18_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB18_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
+; ISEL-LABEL: select_C1_C2_zeroext:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 421
+; ISEL-NEXT: li 3, 42
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_C1_C2_zeroext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 421
+; NO_ISEL-NEXT: li 3, 42
+; NO_ISEL-NEXT: bc 12, 1, .LBB19_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB19_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+define i32 @select_C1_C2_signext(i1 signext %cond) {
+; ISEL-LABEL: select_C1_C2_signext:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 421
+; ISEL-NEXT: li 3, 42
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: select_C1_C2_signext:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 421
+; NO_ISEL-NEXT: li 3, 42
+; NO_ISEL-NEXT: bc 12, 1, .LBB20_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB20_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i32 421, i32 42
+ ret i32 %sel
+}
+
+; A binary operator with constant after the select should always get folded into the select.
+
+define i8 @sel_constants_add_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_add_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 1
+; ISEL-NEXT: li 3, 28
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_add_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 1
+; NO_ISEL-NEXT: li 3, 28
+; NO_ISEL-NEXT: bc 12, 1, .LBB21_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB21_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = add i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_sub_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_sub_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: li 4, 18
+; ISEL-NEXT: ori 3, 3, 65527
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_sub_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: li 4, 18
+; NO_ISEL-NEXT: ori 3, 3, 65527
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = sub i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_mul_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_mul_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: lis 4, 16383
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: ori 3, 4, 65531
+; ISEL-NEXT: li 4, 115
+; ISEL-NEXT: sldi 3, 3, 2
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_mul_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: lis 4, 16383
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: ori 3, 4, 65531
+; NO_ISEL-NEXT: li 4, 115
+; NO_ISEL-NEXT: sldi 3, 3, 2
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = mul i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_sdiv_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_sdiv_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 3, 4
+; ISEL-NEXT: isel 3, 0, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_sdiv_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 3, 4
+; NO_ISEL-NEXT: bc 12, 1, .LBB24_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB24_1:
+; NO_ISEL-NEXT: addi 3, 0, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = sdiv i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_udiv_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_udiv_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 50
+; ISEL-NEXT: li 3, 4
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_udiv_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 50
+; NO_ISEL-NEXT: li 3, 4
+; NO_ISEL-NEXT: bc 12, 1, .LBB25_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB25_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = udiv i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_srem_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_srem_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: lis 4, 16383
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: ori 3, 4, 65535
+; ISEL-NEXT: li 4, 3
+; ISEL-NEXT: sldi 3, 3, 2
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_srem_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: lis 4, 16383
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: ori 3, 4, 65535
+; NO_ISEL-NEXT: li 4, 3
+; NO_ISEL-NEXT: sldi 3, 3, 2
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = srem i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_urem_constant(i1 %cond) {
+; ALL-LABEL: sel_constants_urem_constant:
+; ALL: # BB#0:
+; ALL-NEXT: rlwinm 3, 3, 0, 31, 31
+; ALL-NEXT: subfic 3, 3, 3
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = urem i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_and_constant(i1 %cond) {
+; ALL-LABEL: sel_constants_and_constant:
+; ALL: # BB#0:
+; ALL-NEXT: rlwinm 3, 3, 0, 31, 31
+; ALL-NEXT: subfic 3, 3, 5
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = and i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_or_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_or_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: li 4, 23
+; ISEL-NEXT: ori 3, 3, 65533
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_or_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: li 4, 23
+; NO_ISEL-NEXT: ori 3, 3, 65533
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = or i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_xor_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_xor_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: li 4, 0
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: oris 3, 4, 65535
+; ISEL-NEXT: li 4, 18
+; ISEL-NEXT: ori 3, 3, 65529
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_xor_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: li 4, 0
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: oris 3, 4, 65535
+; NO_ISEL-NEXT: li 4, 18
+; NO_ISEL-NEXT: ori 3, 3, 65529
+; NO_ISEL-NEXT: bclr 12, 1, 0
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = xor i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_shl_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_shl_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: lis 5, 511
+; ISEL-NEXT: lis 4, 2047
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: ori 3, 4, 65535
+; ISEL-NEXT: ori 12, 5, 65535
+; ISEL-NEXT: sldi 3, 3, 5
+; ISEL-NEXT: sldi 4, 12, 7
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_shl_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: lis 5, 511
+; NO_ISEL-NEXT: lis 4, 2047
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: ori 3, 4, 65535
+; NO_ISEL-NEXT: ori 12, 5, 65535
+; NO_ISEL-NEXT: sldi 3, 3, 5
+; NO_ISEL-NEXT: sldi 4, 12, 7
+; NO_ISEL-NEXT: bc 12, 1, .LBB31_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB31_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = shl i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_lshr_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_lshr_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: li 4, 7
+; ISEL-NEXT: li 3, 0
+; ISEL-NEXT: isel 3, 4, 3, 1
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_lshr_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: li 4, 7
+; NO_ISEL-NEXT: li 3, 0
+; NO_ISEL-NEXT: bc 12, 1, .LBB32_1
+; NO_ISEL-NEXT: blr
+; NO_ISEL-NEXT: .LBB32_1:
+; NO_ISEL-NEXT: addi 3, 4, 0
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = lshr i8 %sel, 5
+ ret i8 %bo
+}
+
+define i8 @sel_constants_ashr_constant(i1 %cond) {
+; ALL-LABEL: sel_constants_ashr_constant:
+; ALL: # BB#0:
+; ALL-NEXT: clrldi 3, 3, 63
+; ALL-NEXT: neg 3, 3
+; ALL-NEXT: blr
+ %sel = select i1 %cond, i8 -4, i8 23
+ %bo = ashr i8 %sel, 5
+ ret i8 %bo
+}
+
+define double @sel_constants_fadd_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_fadd_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: addis 4, 2, .LCPI34_0@toc@ha
+; ISEL-NEXT: addis 3, 2, .LCPI34_1@toc@ha
+; ISEL-NEXT: addi 4, 4, .LCPI34_0@toc@l
+; ISEL-NEXT: addi 3, 3, .LCPI34_1@toc@l
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: lxsdx 1, 0, 3
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_fadd_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: addis 4, 2, .LCPI34_0@toc@ha
+; NO_ISEL-NEXT: addis 3, 2, .LCPI34_1@toc@ha
+; NO_ISEL-NEXT: addi 4, 4, .LCPI34_0@toc@l
+; NO_ISEL-NEXT: addi 3, 3, .LCPI34_1@toc@l
+; NO_ISEL-NEXT: bc 12, 1, .LBB34_2
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: b .LBB34_2
+; NO_ISEL-NEXT: .LBB34_2:
+; NO_ISEL-NEXT: lxsdx 1, 0, 3
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, double -4.0, double 23.3
+ %bo = fadd double %sel, 5.1
+ ret double %bo
+}
+
+define double @sel_constants_fsub_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_fsub_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: addis 4, 2, .LCPI35_0@toc@ha
+; ISEL-NEXT: addis 3, 2, .LCPI35_1@toc@ha
+; ISEL-NEXT: addi 4, 4, .LCPI35_0@toc@l
+; ISEL-NEXT: addi 3, 3, .LCPI35_1@toc@l
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: lxsdx 1, 0, 3
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_fsub_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: addis 4, 2, .LCPI35_0@toc@ha
+; NO_ISEL-NEXT: addis 3, 2, .LCPI35_1@toc@ha
+; NO_ISEL-NEXT: addi 4, 4, .LCPI35_0@toc@l
+; NO_ISEL-NEXT: addi 3, 3, .LCPI35_1@toc@l
+; NO_ISEL-NEXT: bc 12, 1, .LBB35_2
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: b .LBB35_2
+; NO_ISEL-NEXT: .LBB35_2:
+; NO_ISEL-NEXT: lxsdx 1, 0, 3
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, double -4.0, double 23.3
+ %bo = fsub double %sel, 5.1
+ ret double %bo
+}
+
+define double @sel_constants_fmul_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_fmul_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: addis 4, 2, .LCPI36_0@toc@ha
+; ISEL-NEXT: addis 3, 2, .LCPI36_1@toc@ha
+; ISEL-NEXT: addi 4, 4, .LCPI36_0@toc@l
+; ISEL-NEXT: addi 3, 3, .LCPI36_1@toc@l
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: lxsdx 1, 0, 3
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_fmul_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: addis 4, 2, .LCPI36_0@toc@ha
+; NO_ISEL-NEXT: addis 3, 2, .LCPI36_1@toc@ha
+; NO_ISEL-NEXT: addi 4, 4, .LCPI36_0@toc@l
+; NO_ISEL-NEXT: addi 3, 3, .LCPI36_1@toc@l
+; NO_ISEL-NEXT: bc 12, 1, .LBB36_2
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: b .LBB36_2
+; NO_ISEL-NEXT: .LBB36_2:
+; NO_ISEL-NEXT: lxsdx 1, 0, 3
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, double -4.0, double 23.3
+ %bo = fmul double %sel, 5.1
+ ret double %bo
+}
+
+define double @sel_constants_fdiv_constant(i1 %cond) {
+; ISEL-LABEL: sel_constants_fdiv_constant:
+; ISEL: # BB#0:
+; ISEL-NEXT: andi. 3, 3, 1
+; ISEL-NEXT: addis 4, 2, .LCPI37_0@toc@ha
+; ISEL-NEXT: addis 3, 2, .LCPI37_1@toc@ha
+; ISEL-NEXT: addi 4, 4, .LCPI37_0@toc@l
+; ISEL-NEXT: addi 3, 3, .LCPI37_1@toc@l
+; ISEL-NEXT: isel 3, 3, 4, 1
+; ISEL-NEXT: lxsdx 1, 0, 3
+; ISEL-NEXT: blr
+;
+; NO_ISEL-LABEL: sel_constants_fdiv_constant:
+; NO_ISEL: # BB#0:
+; NO_ISEL-NEXT: andi. 3, 3, 1
+; NO_ISEL-NEXT: addis 4, 2, .LCPI37_0@toc@ha
+; NO_ISEL-NEXT: addis 3, 2, .LCPI37_1@toc@ha
+; NO_ISEL-NEXT: addi 4, 4, .LCPI37_0@toc@l
+; NO_ISEL-NEXT: addi 3, 3, .LCPI37_1@toc@l
+; NO_ISEL-NEXT: bc 12, 1, .LBB37_2
+; NO_ISEL-NEXT: # BB#1:
+; NO_ISEL-NEXT: ori 3, 4, 0
+; NO_ISEL-NEXT: b .LBB37_2
+; NO_ISEL-NEXT: .LBB37_2:
+; NO_ISEL-NEXT: lxsdx 1, 0, 3
+; NO_ISEL-NEXT: blr
+ %sel = select i1 %cond, double -4.0, double 23.3
+ %bo = fdiv double %sel, 5.1
+ ret double %bo
+}
+
+define double @sel_constants_frem_constant(i1 %cond) {
+; ALL-LABEL: sel_constants_frem_constant:
+; ALL: # BB#0:
+; ALL-NEXT: andi. 3, 3, 1
+; ALL-NEXT: bc 12, 1, .LBB38_2
+; ALL-NEXT: # BB#1:
+; ALL-NEXT: addis 3, 2, .LCPI38_0@toc@ha
+; ALL-NEXT: addi 3, 3, .LCPI38_0@toc@l
+; ALL-NEXT: lxsdx 1, 0, 3
+; ALL-NEXT: blr
+; ALL-NEXT: .LBB38_2:
+; ALL-NEXT: addis 3, 2, .LCPI38_1@toc@ha
+; ALL-NEXT: addi 3, 3, .LCPI38_1@toc@l
+; ALL-NEXT: lxsspx 1, 0, 3
+; ALL-NEXT: blr
+ %sel = select i1 %cond, double -4.0, double 23.3
+ %bo = frem double %sel, 5.1
+ ret double %bo
+}
+
diff --git a/test/CodeGen/PowerPC/setcc-logic.ll b/test/CodeGen/PowerPC/setcc-logic.ll
new file mode 100644
index 000000000000..2ed08e2ae380
--- /dev/null
+++ b/test/CodeGen/PowerPC/setcc-logic.ll
@@ -0,0 +1,478 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mtriple=powerpc64le-unknown-unknown | FileCheck %s
+
+define zeroext i1 @all_bits_clear(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT: blr
+ %a = icmp eq i32 %P, 0
+ %b = icmp eq i32 %Q, 0
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @all_sign_bits_clear(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_sign_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: nor 3, 3, 3
+; CHECK-NEXT: srwi 3, 3, 31
+; CHECK-NEXT: blr
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @all_bits_set(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: li 5, 0
+; CHECK-NEXT: li 12, 1
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: isel 3, 12, 5, 2
+; CHECK-NEXT: blr
+ %a = icmp eq i32 %P, -1
+ %b = icmp eq i32 %Q, -1
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @all_sign_bits_set(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_sign_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: srwi 3, 3, 31
+; CHECK-NEXT: blr
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = and i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_bits_set(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: nor 3, 3, 3
+; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT: blr
+ %a = icmp ne i32 %P, 0
+ %b = icmp ne i32 %Q, 0
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_sign_bits_set(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_sign_bits_set:
+; CHECK: # BB#0:
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: srwi 3, 3, 31
+; CHECK-NEXT: blr
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_bits_clear(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: li 5, 1
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: isel 3, 0, 5, 2
+; CHECK-NEXT: blr
+ %a = icmp ne i32 %P, -1
+ %b = icmp ne i32 %Q, -1
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+define zeroext i1 @any_sign_bits_clear(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_sign_bits_clear:
+; CHECK: # BB#0:
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: nor 3, 3, 3
+; CHECK-NEXT: srwi 3, 3, 31
+; CHECK-NEXT: blr
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = or i1 %a, %b
+ ret i1 %c
+}
+
+; PR3351 - (P == 0) & (Q == 0) -> (P|Q) == 0
+define i32 @all_bits_clear_branch(i32* %P, i32* %Q) {
+; CHECK-LABEL: all_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: or. 3, 3, 4
+; CHECK-NEXT: bne 0, .LBB8_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB8_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp eq i32* %P, null
+ %b = icmp eq i32* %Q, null
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @all_sign_bits_clear_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_sign_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, 0
+; CHECK-NEXT: blt 0, .LBB9_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB9_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @all_bits_set_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: bne 0, .LBB10_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB10_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp eq i32 %P, -1
+ %b = icmp eq i32 %Q, -1
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @all_sign_bits_set_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: all_sign_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: bgt 0, .LBB11_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB11_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = and i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+; PR3351 - (P != 0) | (Q != 0) -> (P|Q) != 0
+define i32 @any_bits_set_branch(i32* %P, i32* %Q) {
+; CHECK-LABEL: any_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: or. 3, 3, 4
+; CHECK-NEXT: beq 0, .LBB12_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB12_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp ne i32* %P, null
+ %b = icmp ne i32* %Q, null
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @any_sign_bits_set_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_sign_bits_set_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: or 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: bgt 0, .LBB13_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB13_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp slt i32 %P, 0
+ %b = icmp slt i32 %Q, 0
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @any_bits_clear_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, -1
+; CHECK-NEXT: beq 0, .LBB14_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB14_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp ne i32 %P, -1
+ %b = icmp ne i32 %Q, -1
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define i32 @any_sign_bits_clear_branch(i32 %P, i32 %Q) {
+; CHECK-LABEL: any_sign_bits_clear_branch:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: and 3, 3, 4
+; CHECK-NEXT: cmpwi 0, 3, 0
+; CHECK-NEXT: blt 0, .LBB15_2
+; CHECK-NEXT: # BB#1: # %bb1
+; CHECK-NEXT: li 3, 4
+; CHECK-NEXT: blr
+; CHECK-NEXT: .LBB15_2: # %return
+; CHECK-NEXT: li 3, 192
+; CHECK-NEXT: blr
+entry:
+ %a = icmp sgt i32 %P, -1
+ %b = icmp sgt i32 %Q, -1
+ %c = or i1 %a, %b
+ br i1 %c, label %bb1, label %return
+
+bb1:
+ ret i32 4
+
+return:
+ ret i32 192
+}
+
+define <4 x i1> @all_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: all_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: xxlxor 36, 36, 36
+; CHECK-NEXT: xxlor 34, 34, 35
+; CHECK-NEXT: vcmpequw 2, 2, 4
+; CHECK-NEXT: blr
+ %a = icmp eq <4 x i32> %P, zeroinitializer
+ %b = icmp eq <4 x i32> %Q, zeroinitializer
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @all_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: all_sign_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: vspltisb 4, -1
+; CHECK-NEXT: xxlor 34, 34, 35
+; CHECK-NEXT: vcmpgtsw 2, 2, 4
+; CHECK-NEXT: blr
+ %a = icmp sgt <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp sgt <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @all_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: all_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: vspltisb 4, -1
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vcmpequw 2, 2, 4
+; CHECK-NEXT: blr
+ %a = icmp eq <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp eq <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @all_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: all_sign_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: xxlxor 36, 36, 36
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vcmpgtsw 2, 4, 2
+; CHECK-NEXT: blr
+ %a = icmp slt <4 x i32> %P, zeroinitializer
+ %b = icmp slt <4 x i32> %Q, zeroinitializer
+ %c = and <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: any_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: xxlxor 36, 36, 36
+; CHECK-NEXT: xxlor 34, 34, 35
+; CHECK-NEXT: vcmpequw 2, 2, 4
+; CHECK-NEXT: xxlnor 34, 34, 34
+; CHECK-NEXT: blr
+ %a = icmp ne <4 x i32> %P, zeroinitializer
+ %b = icmp ne <4 x i32> %Q, zeroinitializer
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_sign_bits_set_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: any_sign_bits_set_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: xxlxor 36, 36, 36
+; CHECK-NEXT: xxlor 34, 34, 35
+; CHECK-NEXT: vcmpgtsw 2, 4, 2
+; CHECK-NEXT: blr
+ %a = icmp slt <4 x i32> %P, zeroinitializer
+ %b = icmp slt <4 x i32> %Q, zeroinitializer
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: any_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: vspltisb 4, -1
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vcmpequw 2, 2, 4
+; CHECK-NEXT: xxlnor 34, 34, 34
+; CHECK-NEXT: blr
+ %a = icmp ne <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp ne <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define <4 x i1> @any_sign_bits_clear_vec(<4 x i32> %P, <4 x i32> %Q) {
+; CHECK-LABEL: any_sign_bits_clear_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: vspltisb 4, -1
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vcmpgtsw 2, 2, 4
+; CHECK-NEXT: blr
+ %a = icmp sgt <4 x i32> %P, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %b = icmp sgt <4 x i32> %Q, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %c = or <4 x i1> %a, %b
+ ret <4 x i1> %c
+}
+
+define zeroext i1 @ne_neg1_and_ne_zero(i64 %x) {
+; CHECK-LABEL: ne_neg1_and_ne_zero:
+; CHECK: # BB#0:
+; CHECK-NEXT: addi 3, 3, 1
+; CHECK-NEXT: li 4, 0
+; CHECK-NEXT: li 12, 1
+; CHECK-NEXT: cmpldi 3, 1
+; CHECK-NEXT: isel 3, 12, 4, 1
+; CHECK-NEXT: blr
+ %cmp1 = icmp ne i64 %x, -1
+ %cmp2 = icmp ne i64 %x, 0
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+; PR32401 - https://bugs.llvm.org/show_bug.cgi?id=32401
+
+define zeroext i1 @and_eq(i16 zeroext %a, i16 zeroext %b, i16 zeroext %c, i16 zeroext %d) {
+; CHECK-LABEL: and_eq:
+; CHECK: # BB#0:
+; CHECK-NEXT: xor 5, 5, 6
+; CHECK-NEXT: xor 3, 3, 4
+; CHECK-NEXT: or 3, 3, 5
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT: blr
+ %cmp1 = icmp eq i16 %a, %b
+ %cmp2 = icmp eq i16 %c, %d
+ %and = and i1 %cmp1, %cmp2
+ ret i1 %and
+}
+
+define zeroext i1 @or_ne(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: or_ne:
+; CHECK: # BB#0:
+; CHECK-NEXT: xor 5, 5, 6
+; CHECK-NEXT: xor 3, 3, 4
+; CHECK-NEXT: or 3, 3, 5
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: nor 3, 3, 3
+; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT: blr
+ %cmp1 = icmp ne i32 %a, %b
+ %cmp2 = icmp ne i32 %c, %d
+ %or = or i1 %cmp1, %cmp2
+ ret i1 %or
+}
+
+; This should not be transformed because vector compares + bitwise logic are faster.
+
+define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) {
+; CHECK-LABEL: and_eq_vec:
+; CHECK: # BB#0:
+; CHECK-NEXT: vcmpequw 2, 2, 3
+; CHECK-NEXT: vcmpequw 19, 4, 5
+; CHECK-NEXT: xxland 34, 34, 51
+; CHECK-NEXT: blr
+ %cmp1 = icmp eq <4 x i32> %a, %b
+ %cmp2 = icmp eq <4 x i32> %c, %d
+ %and = and <4 x i1> %cmp1, %cmp2
+ ret <4 x i1> %and
+}
+
diff --git a/test/CodeGen/PowerPC/setcc-to-sub.ll b/test/CodeGen/PowerPC/setcc-to-sub.ll
index 335bb403cd7f..752ebe0c9d8b 100644
--- a/test/CodeGen/PowerPC/setcc-to-sub.ll
+++ b/test/CodeGen/PowerPC/setcc-to-sub.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \
; RUN: -mcpu=pwr8 < %s | FileCheck %s
@@ -6,6 +7,15 @@
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test1(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+; CHECK-LABEL: test1:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lwz 3, 0(3)
+; CHECK-NEXT: lwz 4, 0(4)
+; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
+; CHECK-NEXT: rlwinm 4, 4, 0, 28, 28
+; CHECK-NEXT: sub 3, 3, 4
+; CHECK-NEXT: rldicl 3, 3, 1, 63
+; CHECK-NEXT: blr
entry:
%arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
%0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
@@ -15,18 +25,20 @@ entry:
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp ult i32 %and.i, %and.i4
ret i1 %cmp.i5
-
-; CHECK-LABEL: @test1
-; CHECK: rlwinm [[REG1:[0-9]*]]
-; CHECK-NEXT: rlwinm [[REG2:[0-9]*]]
-; CHECK-NEXT: sub [[REG3:[0-9]*]], [[REG1]], [[REG2]]
-; CHECK-NEXT: rldicl 3, [[REG3]]
-; CHECK: blr
-
}
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test2(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+; CHECK-LABEL: test2:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lwz 3, 0(3)
+; CHECK-NEXT: lwz 4, 0(4)
+; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
+; CHECK-NEXT: rlwinm 4, 4, 0, 28, 28
+; CHECK-NEXT: sub 3, 4, 3
+; CHECK-NEXT: rldicl 3, 3, 1, 63
+; CHECK-NEXT: xori 3, 3, 1
+; CHECK-NEXT: blr
entry:
%arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
%0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
@@ -36,19 +48,19 @@ entry:
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp ule i32 %and.i, %and.i4
ret i1 %cmp.i5
-
-; CHECK-LABEL: @test2
-; CHECK: rlwinm [[REG1:[0-9]*]]
-; CHECK-NEXT: rlwinm [[REG2:[0-9]*]]
-; CHECK-NEXT: sub [[REG3:[0-9]*]], [[REG2]], [[REG1]]
-; CHECK-NEXT: rldicl [[REG4:[0-9]*]], [[REG3]]
-; CHECK-NEXT: xori 3, [[REG4]], 1
-; CHECK: blr
-
}
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test3(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+; CHECK-LABEL: test3:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lwz 3, 0(3)
+; CHECK-NEXT: lwz 4, 0(4)
+; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
+; CHECK-NEXT: rlwinm 4, 4, 0, 28, 28
+; CHECK-NEXT: sub 3, 4, 3
+; CHECK-NEXT: rldicl 3, 3, 1, 63
+; CHECK-NEXT: blr
entry:
%arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
%0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
@@ -58,18 +70,20 @@ entry:
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp ugt i32 %and.i, %and.i4
ret i1 %cmp.i5
-
-; CHECK-LABEL: @test3
-; CHECK: rlwinm [[REG1:[0-9]*]]
-; CHECK-NEXT: rlwinm [[REG2:[0-9]*]]
-; CHECK-NEXT: sub [[REG3:[0-9]*]], [[REG2]], [[REG1]]
-; CHECK-NEXT: rldicl 3, [[REG3]]
-; CHECK: blr
-
}
; Function Attrs: norecurse nounwind readonly
define zeroext i1 @test4(%class.PB2* %s_a, %class.PB2* %s_b) local_unnamed_addr #0 {
+; CHECK-LABEL: test4:
+; CHECK: # BB#0: # %entry
+; CHECK-NEXT: lwz 3, 0(3)
+; CHECK-NEXT: lwz 4, 0(4)
+; CHECK-NEXT: rlwinm 3, 3, 0, 28, 28
+; CHECK-NEXT: rlwinm 4, 4, 0, 28, 28
+; CHECK-NEXT: sub 3, 3, 4
+; CHECK-NEXT: rldicl 3, 3, 1, 63
+; CHECK-NEXT: xori 3, 3, 1
+; CHECK-NEXT: blr
entry:
%arrayidx.i6 = bitcast %class.PB2* %s_a to i32*
%0 = load i32, i32* %arrayidx.i6, align 8, !tbaa !1
@@ -79,15 +93,6 @@ entry:
%and.i4 = and i32 %1, 8
%cmp.i5 = icmp uge i32 %and.i, %and.i4
ret i1 %cmp.i5
-
-; CHECK-LABEL: @test4
-; CHECK: rlwinm [[REG1:[0-9]*]]
-; CHECK-NEXT: rlwinm [[REG2:[0-9]*]]
-; CHECK-NEXT: sub [[REG3:[0-9]*]], [[REG1]], [[REG2]]
-; CHECK-NEXT: rldicl [[REG4:[0-9]*]], [[REG3]]
-; CHECK-NEXT: xori 3, [[REG4]], 1
-; CHECK: blr
-
}
!1 = !{!2, !2, i64 0}
diff --git a/test/CodeGen/PowerPC/sjlj_no0x.ll b/test/CodeGen/PowerPC/sjlj_no0x.ll
new file mode 100644
index 000000000000..2018bcbbc931
--- /dev/null
+++ b/test/CodeGen/PowerPC/sjlj_no0x.ll
@@ -0,0 +1,29 @@
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=a2 -verify-machineinstrs | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -verify-machineinstrs | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind
+define void @_Z23BuiltinLongJmpFunc1_bufv() #0 {
+entry:
+ call void @llvm.eh.sjlj.longjmp(i8* bitcast (void ()* @_Z23BuiltinLongJmpFunc1_bufv to i8*))
+ unreachable
+
+; CHECK: @_Z23BuiltinLongJmpFunc1_bufv
+; CHECK: addis [[REG:[0-9]+]], 2, .LC0@toc@ha
+; CHECK: ld 31, 0([[REG]])
+; CHECK: ld [[REG2:[0-9]+]], 8([[REG]])
+; CHECK-DAG: ld 1, 16([[REG]])
+; CHECK-DAG: ld 30, 32([[REG]])
+; CHECK-DAG: ld 2, 24([[REG]])
+; CHECK-DAG: mtctr [[REG2]]
+; CHECK: bctr
+
+return: ; No predecessors!
+ ret void
+}
+
+; Function Attrs: noreturn nounwind
+declare void @llvm.eh.sjlj.longjmp(i8*) #1
diff --git a/test/CodeGen/PowerPC/srl-mask.ll b/test/CodeGen/PowerPC/srl-mask.ll
index e581eae0ee57..1a429b1bae36 100644
--- a/test/CodeGen/PowerPC/srl-mask.ll
+++ b/test/CodeGen/PowerPC/srl-mask.ll
@@ -12,5 +12,16 @@ entry:
; CHECK: blr
}
+; for AND with an immediate like (x & ~0xFFFF)
+; we should use rldicl instruction
+define i64 @bar(i64 %x) #0 {
+entry:
+; CHECK-LABEL: @bar
+ %a = and i64 %x, 18446744073709486080
+; CHECK: rldicr 3, 3, 0, 47
+ ret i64 %a
+; CHECK: blr
+}
+
attributes #0 = { nounwind }
diff --git a/test/CodeGen/PowerPC/stacksize.ll b/test/CodeGen/PowerPC/stacksize.ll
new file mode 100644
index 000000000000..947aaa0fa49e
--- /dev/null
+++ b/test/CodeGen/PowerPC/stacksize.ll
@@ -0,0 +1,86 @@
+; For ELFv2 ABI, we can avoid allocating the parameter area in the stack frame of the caller function
+; if all the arguments can be passed to the callee in registers.
+; For ELFv1 ABI, we always need to allocate the parameter area.
+
+; Tests for ELFv2 ABI
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -target-abi elfv2 < %s | FileCheck %s -check-prefix=PPC64-ELFV2
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -target-abi elfv2 < %s | FileCheck %s -check-prefix=PPC64-ELFV2
+
+; Tests for ELFv1 ABI
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu -target-abi elfv1 < %s | FileCheck %s -check-prefix=PPC64-ELFV1
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu -target-abi elfv1 < %s | FileCheck %s -check-prefix=PPC64-ELFV1
+
+; If the callee has at most eight integer args, parameter area can be ommited for ELFv2 ABI.
+
+; PPC64-ELFV2-LABEL: WithoutParamArea1:
+; PPC64-ELFV2-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV2: stdu 1, -32(1)
+; PPC64-ELFV2: addi 1, 1, 32
+; PPC64-ELFV2-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1-LABEL: WithoutParamArea1:
+; PPC64-ELFV1-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1: stdu 1, -112(1)
+; PPC64-ELFV1: addi 1, 1, 112
+; PPC64-ELFV1-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+define signext i32 @WithoutParamArea1(i32 signext %a) local_unnamed_addr #0 {
+entry:
+ %call = tail call signext i32 @onearg(i32 signext %a) #2
+ ret i32 %call
+}
+
+; PPC64-ELFV2-LABEL: WithoutParamArea2:
+; PPC64-ELFV2-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV2: stdu 1, -32(1)
+; PPC64-ELFV2: addi 1, 1, 32
+; PPC64-ELFV2-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1-LABEL: WithoutParamArea2:
+; PPC64-ELFV1-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1: stdu 1, -112(1)
+; PPC64-ELFV1: addi 1, 1, 112
+; PPC64-ELFV1-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+define signext i32 @WithoutParamArea2(i32 signext %a) local_unnamed_addr #0 {
+entry:
+ %call = tail call signext i32 @eightargs(i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a) #2
+ ret i32 %call
+}
+
+; If the callee has more than eight integer args or variable number of args,
+; parameter area cannot be ommited even for ELFv2 ABI
+
+; PPC64-ELFV2-LABEL: WithParamArea1:
+; PPC64-ELFV2-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV2: stdu 1, -96(1)
+; PPC64-ELFV2: addi 1, 1, 96
+; PPC64-ELFV2-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1-LABEL: WithParamArea1:
+; PPC64-ELFV1-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1: stdu 1, -112(1)
+; PPC64-ELFV1: addi 1, 1, 112
+; PPC64-ELFV1-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+define signext i32 @WithParamArea1(i32 signext %a) local_unnamed_addr #0 {
+entry:
+ %call = tail call signext i32 (i32, ...) @varargs(i32 signext %a, i32 signext %a) #2
+ ret i32 %call
+}
+
+; PPC64-ELFV2-LABEL: WithParamArea2:
+; PPC64-ELFV2-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV2: stdu 1, -112(1)
+; PPC64-ELFV2: addi 1, 1, 112
+; PPC64-ELFV2-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1-LABEL: WithParamArea2:
+; PPC64-ELFV1-NOT: stw {{[0-9]+}}, -{{[0-9]+}}(1)
+; PPC64-ELFV1: stdu 1, -128(1)
+; PPC64-ELFV1: addi 1, 1, 128
+; PPC64-ELFV1-NOT: lwz {{[0-9]+}}, -{{[0-9]+}}(1)
+define signext i32 @WithParamArea2(i32 signext %a) local_unnamed_addr #0 {
+entry:
+ %call = tail call signext i32 @nineargs(i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a, i32 signext %a) #2
+ ret i32 %call
+}
+
+declare signext i32 @onearg(i32 signext) local_unnamed_addr #1
+declare signext i32 @eightargs(i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext) local_unnamed_addr #1
+declare signext i32 @nineargs(i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext, i32 signext) local_unnamed_addr #1
+declare signext i32 @varargs(i32 signext, ...) local_unnamed_addr #1
+
diff --git a/test/CodeGen/PowerPC/structsinmem.ll b/test/CodeGen/PowerPC/structsinmem.ll
index 3777f3ec5bab..01b0848e7070 100644
--- a/test/CodeGen/PowerPC/structsinmem.ll
+++ b/test/CodeGen/PowerPC/structsinmem.ll
@@ -113,13 +113,13 @@ entry:
%add13 = add nsw i32 %add11, %6
ret i32 %add13
-; CHECK: lha {{[0-9]+}}, 126(1)
-; CHECK: lha {{[0-9]+}}, 132(1)
-; CHECK: lbz {{[0-9]+}}, 119(1)
-; CHECK: lwz {{[0-9]+}}, 140(1)
-; CHECK: lwz {{[0-9]+}}, 144(1)
-; CHECK: lwz {{[0-9]+}}, 152(1)
-; CHECK: lwz {{[0-9]+}}, 160(1)
+; CHECK-DAG: lha {{[0-9]+}}, 126(1)
+; CHECK-DAG: lha {{[0-9]+}}, 132(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 119(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 140(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 144(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 152(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 160(1)
}
define i32 @caller2() nounwind {
@@ -205,11 +205,11 @@ entry:
%add13 = add nsw i32 %add11, %6
ret i32 %add13
-; CHECK: lha {{[0-9]+}}, 126(1)
-; CHECK: lha {{[0-9]+}}, 133(1)
-; CHECK: lbz {{[0-9]+}}, 119(1)
-; CHECK: lwz {{[0-9]+}}, 140(1)
-; CHECK: lwz {{[0-9]+}}, 147(1)
-; CHECK: lwz {{[0-9]+}}, 154(1)
-; CHECK: lwz {{[0-9]+}}, 161(1)
+; CHECK-DAG: lha {{[0-9]+}}, 126(1)
+; CHECK-DAG: lha {{[0-9]+}}, 133(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 119(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 140(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 147(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 154(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 161(1)
}
diff --git a/test/CodeGen/PowerPC/structsinregs.ll b/test/CodeGen/PowerPC/structsinregs.ll
index e27041dd4c88..54679f259e9a 100644
--- a/test/CodeGen/PowerPC/structsinregs.ll
+++ b/test/CodeGen/PowerPC/structsinregs.ll
@@ -59,6 +59,7 @@ entry:
%call = call i32 @callee1(%struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7)
ret i32 %call
+; CHECK-LABEL: caller1
; CHECK: ld 9, 112(31)
; CHECK: ld 8, 120(31)
; CHECK: ld 7, 128(31)
@@ -97,20 +98,21 @@ entry:
%add13 = add nsw i32 %add11, %6
ret i32 %add13
-; CHECK: std 9, 96(1)
-; CHECK: std 8, 88(1)
-; CHECK: std 7, 80(1)
-; CHECK: stw 6, 76(1)
-; CHECK: stw 5, 68(1)
-; CHECK: sth 4, 62(1)
-; CHECK: stb 3, 55(1)
-; CHECK: lha {{[0-9]+}}, 62(1)
-; CHECK: lha {{[0-9]+}}, 68(1)
-; CHECK: lbz {{[0-9]+}}, 55(1)
-; CHECK: lwz {{[0-9]+}}, 76(1)
-; CHECK: lwz {{[0-9]+}}, 80(1)
-; CHECK: lwz {{[0-9]+}}, 88(1)
-; CHECK: lwz {{[0-9]+}}, 96(1)
+; CHECK-LABEL: callee1
+; CHECK-DAG: std 9, 96(1)
+; CHECK-DAG: std 8, 88(1)
+; CHECK-DAG: std 7, 80(1)
+; CHECK-DAG: stw 6, 76(1)
+; CHECK-DAG: stw 5, 68(1)
+; CHECK-DAG: sth 4, 62(1)
+; CHECK-DAG: stb 3, 55(1)
+; CHECK-DAG: lha {{[0-9]+}}, 62(1)
+; CHECK-DAG: lha {{[0-9]+}}, 68(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 55(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 76(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 80(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 88(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 96(1)
}
define i32 @caller2() nounwind {
@@ -139,6 +141,7 @@ entry:
%call = call i32 @callee2(%struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7)
ret i32 %call
+; CHECK-LABEL: caller2
; CHECK: stb {{[0-9]+}}, 71(1)
; CHECK: sth {{[0-9]+}}, 69(1)
; CHECK: stb {{[0-9]+}}, 87(1)
@@ -184,18 +187,19 @@ entry:
%add13 = add nsw i32 %add11, %6
ret i32 %add13
-; CHECK: std 9, 96(1)
-; CHECK: std 8, 88(1)
-; CHECK: std 7, 80(1)
-; CHECK: stw 6, 76(1)
-; CHECK: std 5, 64(1)
-; CHECK: sth 4, 62(1)
-; CHECK: stb 3, 55(1)
-; CHECK: lha {{[0-9]+}}, 62(1)
-; CHECK: lha {{[0-9]+}}, 69(1)
-; CHECK: lbz {{[0-9]+}}, 55(1)
-; CHECK: lwz {{[0-9]+}}, 76(1)
-; CHECK: lwz {{[0-9]+}}, 83(1)
-; CHECK: lwz {{[0-9]+}}, 90(1)
-; CHECK: lwz {{[0-9]+}}, 97(1)
+; CHECK-LABEL: callee2
+; CHECK-DAG: std 9, 96(1)
+; CHECK-DAG: std 8, 88(1)
+; CHECK-DAG: std 7, 80(1)
+; CHECK-DAG: stw 6, 76(1)
+; CHECK-DAG: std 5, 64(1)
+; CHECK-DAG: sth 4, 62(1)
+; CHECK-DAG: stb 3, 55(1)
+; CHECK-DAG: lha {{[0-9]+}}, 62(1)
+; CHECK-DAG: lha {{[0-9]+}}, 69(1)
+; CHECK-DAG: lbz {{[0-9]+}}, 55(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 76(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 83(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 90(1)
+; CHECK-DAG: lwz {{[0-9]+}}, 97(1)
}
diff --git a/test/CodeGen/PowerPC/subreg-postra-2.ll b/test/CodeGen/PowerPC/subreg-postra-2.ll
index fb33b9e35425..338000cd8bae 100644
--- a/test/CodeGen/PowerPC/subreg-postra-2.ll
+++ b/test/CodeGen/PowerPC/subreg-postra-2.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -31,10 +32,16 @@ while.end418: ; preds = %wait_on_buffer.exit
br i1 %tobool419, label %if.end421, label %if.then420
; CHECK-LABEL: @jbd2_journal_commit_transaction
+; CHECK-NO-ISEL-LABEL: @jbd2_journal_commit_transaction
; CHECK: andi.
; CHECK: crmove [[REG:[0-9]+]], 1
; CHECK: stdcx.
; CHECK: isel {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}}, [[REG]]
+; CHECK-NO-ISEL: bc 12, 20, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 4, 7, 0
+; CHECK-NO-ISEL-NEXT: b [[SUCCESSOR:.LBB[0-9]+]]
+; CHECK-NO-ISEL: [[TRUE]]
+; CHECK-NO-ISEL-NEXT: addi 4, 3, 0
if.then420: ; preds = %while.end418
unreachable
diff --git a/test/CodeGen/PowerPC/subreg-postra.ll b/test/CodeGen/PowerPC/subreg-postra.ll
index 877ceccd918a..7557e4e9a467 100644
--- a/test/CodeGen/PowerPC/subreg-postra.ll
+++ b/test/CodeGen/PowerPC/subreg-postra.ll
@@ -1,4 +1,5 @@
; RUN: llc -verify-machineinstrs -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mcpu=pwr7 -ppc-gen-isel=false < %s | FileCheck --check-prefix=CHECK-NO-ISEL %s
target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
@@ -145,10 +146,15 @@ wait_on_buffer.exit1319: ; preds = %while.body392
br i1 %inp8, label %while.end418, label %while.body392
; CHECK-LABEL: @jbd2_journal_commit_transaction
+; CHECK-NO-ISEL-LABEL: @jbd2_journal_commit_transaction
; CHECK: andi.
; CHECK: crmove
; CHECK: stdcx.
; CHECK: isel {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}},
+; CHECK-NO-ISEL: bc 12, 1, [[TRUE:.LBB[0-9]+]]
+; CHECK-NO-ISEL: ori 30, 3, 0
+; CHECK-NO-ISEL: b [[SUCCESSOR:.LBB[0-9]+]]
+
while.end418: ; preds = %wait_on_buffer.exit1319, %do.body378
%err.4.lcssa = phi i32 [ %inp2, %do.body378 ], [ %.err.4, %wait_on_buffer.exit1319 ]
diff --git a/test/CodeGen/PowerPC/subtract_from_imm.ll b/test/CodeGen/PowerPC/subtract_from_imm.ll
new file mode 100644
index 000000000000..8fa07b671a3d
--- /dev/null
+++ b/test/CodeGen/PowerPC/subtract_from_imm.ll
@@ -0,0 +1,41 @@
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s
+
+; Make sure that the subfic is generated iff possible
+
+define i64 @subtract_from_imm1(i64 %v) nounwind readnone {
+entry:
+; CHECK-LABEL: subtract_from_imm1
+; CHECK: subfic 3, 3, 32767
+; CHECK: blr
+ %sub = sub i64 32767, %v
+ ret i64 %sub
+}
+
+define i64 @subtract_from_imm2(i64 %v) nounwind readnone {
+entry:
+; CHECK-LABEL: subtract_from_imm2
+; CHECK-NOT: subfic
+; CHECK: blr
+ %sub = sub i64 32768, %v
+ ret i64 %sub
+}
+
+define i64 @subtract_from_imm3(i64 %v) nounwind readnone {
+entry:
+; CHECK-LABEL: subtract_from_imm3
+; CHECK: subfic 3, 3, -32768
+; CHECK: blr
+ %sub = sub i64 -32768, %v
+ ret i64 %sub
+}
+
+define i64 @subtract_from_imm4(i64 %v) nounwind readnone {
+entry:
+; CHECK-LABEL: subtract_from_imm4
+; CHECK-NOT: subfic
+; CHECK: blr
+ %sub = sub i64 -32769, %v
+ ret i64 %sub
+}
+
diff --git a/test/CodeGen/PowerPC/swaps-le-4.ll b/test/CodeGen/PowerPC/swaps-le-4.ll
index 87c6dac9630b..2bf684d9d614 100644
--- a/test/CodeGen/PowerPC/swaps-le-4.ll
+++ b/test/CodeGen/PowerPC/swaps-le-4.ll
@@ -8,11 +8,11 @@ define void @bar() {
entry:
%x = alloca <2 x i64>, align 16
%0 = bitcast <2 x i64>* %x to i8*
- call void @llvm.lifetime.start(i64 16, i8* %0)
+ call void @llvm.lifetime.start.p0i8(i64 16, i8* %0)
%arrayidx = getelementptr inbounds <2 x i64>, <2 x i64>* %x, i64 0, i64 0
store <2 x i64> <i64 0, i64 1>, <2 x i64>* %x, align 16
call void @foo(i64* %arrayidx)
- call void @llvm.lifetime.end(i64 16, i8* %0)
+ call void @llvm.lifetime.end.p0i8(i64 16, i8* %0)
ret void
}
@@ -21,7 +21,7 @@ entry:
; CHECK: stxvd2x
; CHECK-NOT: xxswapd
-declare void @llvm.lifetime.start(i64, i8* nocapture)
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
declare void @foo(i64*)
-declare void @llvm.lifetime.end(i64, i8* nocapture)
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
diff --git a/test/CodeGen/PowerPC/swaps-le-7.ll b/test/CodeGen/PowerPC/swaps-le-7.ll
index dc3c49730700..1d5f50da398e 100644
--- a/test/CodeGen/PowerPC/swaps-le-7.ll
+++ b/test/CodeGen/PowerPC/swaps-le-7.ll
@@ -11,11 +11,11 @@
; CHECK-LABEL: @zg
; CHECK: xxspltd
; CHECK-NEXT: xxspltd
-; CHECK-NEXT: xxswapd
; CHECK-NEXT: xvmuldp
; CHECK-NEXT: xvmuldp
; CHECK-NEXT: xvsubdp
; CHECK-NEXT: xvadddp
+; CHECK-NEXT: xxswapd
; CHECK-NEXT: xxpermdi
; CHECK-NEXT: xvsubdp
; CHECK-NEXT: xxswapd
@@ -52,4 +52,4 @@ L.JA291:
ret void
}
-attributes #0 = { noinline } \ No newline at end of file
+attributes #0 = { noinline }
diff --git a/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll b/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
index 5d03af801fc6..0b1014571613 100644
--- a/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
+++ b/test/CodeGen/PowerPC/tail-dup-branch-to-fallthrough.ll
@@ -3,7 +3,7 @@ target datalayout = "E-m:e-i64:64-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #0
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #0
declare void @f1()
declare void @f2()
@@ -54,11 +54,11 @@ if.else: ; preds = %sw.default
br label %dup2
dup1: ; preds = %sw.0, %sw.1
- call void @llvm.lifetime.end(i64 8, i8* nonnull undef) #0
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull undef) #0
unreachable
dup2: ; preds = %if.then, %if.else
- call void @llvm.lifetime.end(i64 8, i8* nonnull undef) #0
+ call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull undef) #0
unreachable
}
diff --git a/test/CodeGen/PowerPC/tail-dup-break-cfg.ll b/test/CodeGen/PowerPC/tail-dup-break-cfg.ll
new file mode 100644
index 000000000000..f19b11f2ae4c
--- /dev/null
+++ b/test/CodeGen/PowerPC/tail-dup-break-cfg.ll
@@ -0,0 +1,140 @@
+; RUN: llc -O2 -o - %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-grtev4-linux-gnu"
+
+; Intended layout:
+; The code for tail-duplication during layout will produce the layout:
+; test1
+; test2
+; body1 (with copy of test2)
+; body2
+; exit
+
+;CHECK-LABEL: tail_dup_break_cfg:
+;CHECK: mr [[TAGREG:[0-9]+]], 3
+;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
+;CHECK-NEXT: bc 12, 1, [[BODY1LABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: # %test2
+;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: bne 0, [[BODY2LABEL:[._0-9A-Za-z]+]]
+;CHECK: [[EXITLABEL:[._0-9A-Za-z]+]]: # %exit
+;CHECK: blr
+;CHECK-NEXT: [[BODY1LABEL]]
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: beq 0, [[EXITLABEL]]
+;CHECK-NEXT: [[BODY2LABEL:[._0-9A-Za-z]+]]:
+;CHECK: b [[EXITLABEL]]
+define void @tail_dup_break_cfg(i32 %tag) {
+entry:
+ br label %test1
+test1:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %test2, label %body1, !prof !1 ; %test2 more likely
+body1:
+ call void @a()
+ call void @a()
+ call void @a()
+ call void @a()
+ br label %test2
+test2:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp eq i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %exit, label %body2, !prof !1 ; %exit more likely
+body2:
+ call void @b()
+ call void @b()
+ call void @b()
+ call void @b()
+ br label %exit
+exit:
+ ret void
+}
+
+; The branch weights here hint that we shouldn't tail duplicate in this case.
+;CHECK-LABEL: tail_dup_dont_break_cfg:
+;CHECK: mr [[TAGREG:[0-9]+]], 3
+;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
+;CHECK-NEXT: bc 4, 1, [[TEST2LABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: # %body1
+;CHECK: [[TEST2LABEL]]: # %test2
+;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: beq 0, [[EXITLABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: # %body2
+;CHECK: [[EXITLABEL:[._0-9A-Za-z]+]]: # %exit
+;CHECK: blr
+define void @tail_dup_dont_break_cfg(i32 %tag) {
+entry:
+ br label %test1
+test1:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %test2, label %body1, !prof !1 ; %test2 more likely
+body1:
+ call void @a()
+ call void @a()
+ call void @a()
+ call void @a()
+ br label %test2
+test2:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp ne i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %body2, label %exit, !prof !3 ; %body2 more likely
+body2:
+ call void @b()
+ call void @b()
+ call void @b()
+ call void @b()
+ br label %exit
+exit:
+ ret void
+}
+declare void @a()
+declare void @b()
+declare void @c()
+declare void @d()
+
+; This function arranges for the successors of %succ to have already been laid
+; out. When we consider whether to lay out succ after bb and to tail-duplicate
+; it, v and ret have already been placed, so we tail-duplicate as it removes a
+; branch and strictly increases fallthrough
+; CHECK-LABEL: tail_dup_no_succ
+; CHECK: # %entry
+; CHECK: # %v
+; CHECK: # %ret
+; CHECK: # %bb
+; CHECK: # %succ
+; CHECK: # %c
+; CHECK: bl c
+; CHECK: rlwinm. {{[0-9]+}}, {{[0-9]+}}, 0, 29, 29
+; CHECK: beq
+; CHECK: b
+define void @tail_dup_no_succ(i32 %tag) {
+entry:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %v, label %bb, !prof !2 ; %v very much more likely
+bb:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp eq i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %succ, label %c, !prof !3 ; %succ more likely
+c:
+ call void @c()
+ call void @c()
+ br label %succ
+succ:
+ %tagbit3 = and i32 %tag, 4
+ %tagbit3eq0 = icmp eq i32 %tagbit3, 0
+ br i1 %tagbit3eq0, label %ret, label %v, !prof !1 ; %u more likely
+v:
+ call void @d()
+ call void @d()
+ br label %ret
+ret:
+ ret void
+}
+
+
+!1 = !{!"branch_weights", i32 5, i32 3}
+!2 = !{!"branch_weights", i32 95, i32 5}
+!3 = !{!"branch_weights", i32 8, i32 3}
diff --git a/test/CodeGen/PowerPC/tail-dup-layout.ll b/test/CodeGen/PowerPC/tail-dup-layout.ll
index 6790aa8e9441..c9b5bf8c9eeb 100644
--- a/test/CodeGen/PowerPC/tail-dup-layout.ll
+++ b/test/CodeGen/PowerPC/tail-dup-layout.ll
@@ -1,59 +1,59 @@
-; RUN: llc -outline-optional-branches -O2 < %s | FileCheck %s
+; RUN: llc -O2 < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-n32:64"
target triple = "powerpc64le-grtev4-linux-gnu"
; Intended layout:
-; The outlining flag produces the layout
+; The chain-based outlining produces the layout
; test1
; test2
; test3
; test4
-; exit
; optional1
; optional2
; optional3
; optional4
+; exit
; Tail duplication puts test n+1 at the end of optional n
; so optional1 includes a copy of test2 at the end, and branches
; to test3 (at the top) or falls through to optional 2.
-; The CHECK statements check for the whole string of tests and exit block,
+; The CHECK statements check for the whole string of tests
; and then check that the correct test has been duplicated into the end of
; the optional blocks and that the optional blocks are in the correct order.
-;CHECK-LABEL: f:
+;CHECK-LABEL: straight_test:
; test1 may have been merged with entry
;CHECK: mr [[TAGREG:[0-9]+]], 3
;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
-;CHECK-NEXT: bc 12, 1, [[OPT1LABEL:[._0-9A-Za-z]+]]
-;CHECK-NEXT: [[TEST2LABEL:[._0-9A-Za-z]+]]: # %test2
+;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: # %test2
;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
-;CHECK-NEXT: bne 0, [[OPT2LABEL:[._0-9A-Za-z]+]]
-;CHECK-NEXT: [[TEST3LABEL:[._0-9A-Za-z]+]]: # %test3
+;CHECK-NEXT: bne 0, .[[OPT2LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: .[[TEST3LABEL:[_0-9A-Za-z]+]]: # %test3
;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
-;CHECK-NEXT: bne 0, .[[OPT3LABEL:[._0-9A-Za-z]+]]
-;CHECK-NEXT: [[TEST4LABEL:[._0-9A-Za-z]+]]: # %test4
+;CHECK-NEXT: bne 0, .[[OPT3LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: .[[TEST4LABEL:[_0-9A-Za-z]+]]: # %test4
;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 28, 28
-;CHECK-NEXT: bne 0, .[[OPT4LABEL:[._0-9A-Za-z]+]]
-;CHECK-NEXT: [[EXITLABEL:[._0-9A-Za-z]+]]: # %exit
+;CHECK-NEXT: bne 0, .[[OPT4LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: .[[EXITLABEL:[_0-9A-Za-z]+]]: # %exit
;CHECK: blr
-;CHECK-NEXT: [[OPT1LABEL]]
+;CHECK-NEXT: .[[OPT1LABEL]]:
;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
-;CHECK-NEXT: beq 0, [[TEST3LABEL]]
-;CHECK-NEXT: [[OPT2LABEL]]
+;CHECK-NEXT: beq 0, .[[TEST3LABEL]]
+;CHECK-NEXT: .[[OPT2LABEL]]:
;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
-;CHECK-NEXT: beq 0, [[TEST4LABEL]]
-;CHECK-NEXT: [[OPT3LABEL]]
+;CHECK-NEXT: beq 0, .[[TEST4LABEL]]
+;CHECK-NEXT: .[[OPT3LABEL]]:
;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 28, 28
-;CHECK-NEXT: beq 0, [[EXITLABEL]]
-;CHECK-NEXT: [[OPT4LABEL]]
-;CHECK: b [[EXITLABEL]]
+;CHECK-NEXT: beq 0, .[[EXITLABEL]]
+;CHECK-NEXT: .[[OPT4LABEL]]:
+;CHECK: b .[[EXITLABEL]]
-define void @f(i32 %tag) {
+define void @straight_test(i32 %tag) {
entry:
br label %test1
test1:
%tagbit1 = and i32 %tag, 1
%tagbit1eq0 = icmp eq i32 %tagbit1, 0
- br i1 %tagbit1eq0, label %test2, label %optional1
+ br i1 %tagbit1eq0, label %test2, label %optional1, !prof !1
optional1:
call void @a()
call void @a()
@@ -63,7 +63,7 @@ optional1:
test2:
%tagbit2 = and i32 %tag, 2
%tagbit2eq0 = icmp eq i32 %tagbit2, 0
- br i1 %tagbit2eq0, label %test3, label %optional2
+ br i1 %tagbit2eq0, label %test3, label %optional2, !prof !1
optional2:
call void @b()
call void @b()
@@ -73,7 +73,7 @@ optional2:
test3:
%tagbit3 = and i32 %tag, 4
%tagbit3eq0 = icmp eq i32 %tagbit3, 0
- br i1 %tagbit3eq0, label %test4, label %optional3
+ br i1 %tagbit3eq0, label %test4, label %optional3, !prof !1
optional3:
call void @c()
call void @c()
@@ -83,7 +83,7 @@ optional3:
test4:
%tagbit4 = and i32 %tag, 8
%tagbit4eq0 = icmp eq i32 %tagbit4, 0
- br i1 %tagbit4eq0, label %exit, label %optional4
+ br i1 %tagbit4eq0, label %exit, label %optional4, !prof !1
optional4:
call void @d()
call void @d()
@@ -94,7 +94,449 @@ exit:
ret void
}
+; Intended layout:
+; The chain-of-triangles based duplicating produces the layout
+; test1
+; test2
+; test3
+; test4
+; optional1
+; optional2
+; optional3
+; optional4
+; exit
+; even for 50/50 branches.
+; Tail duplication puts test n+1 at the end of optional n
+; so optional1 includes a copy of test2 at the end, and branches
+; to test3 (at the top) or falls through to optional 2.
+; The CHECK statements check for the whole string of tests
+; and then check that the correct test has been duplicated into the end of
+; the optional blocks and that the optional blocks are in the correct order.
+;CHECK-LABEL: straight_test_50:
+; test1 may have been merged with entry
+;CHECK: mr [[TAGREG:[0-9]+]], 3
+;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
+;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: # %test2
+;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: bne 0, .[[OPT2LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: .[[TEST3LABEL:[_0-9A-Za-z]+]]: # %test3
+;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
+;CHECK-NEXT: bne 0, .[[OPT3LABEL:[_0-9A-Za-z]+]]
+;CHECK-NEXT: .[[EXITLABEL:[_0-9A-Za-z]+]]: # %exit
+;CHECK: blr
+;CHECK-NEXT: .[[OPT1LABEL]]:
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: beq 0, .[[TEST3LABEL]]
+;CHECK-NEXT: .[[OPT2LABEL]]:
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
+;CHECK-NEXT: beq 0, .[[EXITLABEL]]
+;CHECK-NEXT: .[[OPT3LABEL]]:
+;CHECK: b .[[EXITLABEL]]
+
+define void @straight_test_50(i32 %tag) {
+entry:
+ br label %test1
+test1:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %test2, label %optional1, !prof !2
+optional1:
+ call void @a()
+ br label %test2
+test2:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp eq i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %test3, label %optional2, !prof !2
+optional2:
+ call void @b()
+ br label %test3
+test3:
+ %tagbit3 = and i32 %tag, 4
+ %tagbit3eq0 = icmp eq i32 %tagbit3, 0
+ br i1 %tagbit3eq0, label %exit, label %optional3, !prof !1
+optional3:
+ call void @c()
+ br label %exit
+exit:
+ ret void
+}
+
+; Intended layout:
+; The chain-based outlining produces the layout
+; entry
+; --- Begin loop ---
+; for.latch
+; for.check
+; test1
+; test2
+; test3
+; test4
+; optional1
+; optional2
+; optional3
+; optional4
+; --- End loop ---
+; exit
+; The CHECK statements check for the whole string of tests and exit block,
+; and then check that the correct test has been duplicated into the end of
+; the optional blocks and that the optional blocks are in the correct order.
+;CHECK-LABEL: loop_test:
+;CHECK: add [[TAGPTRREG:[0-9]+]], 3, 4
+;CHECK: .[[LATCHLABEL:[._0-9A-Za-z]+]]: # %for.latch
+;CHECK: addi
+;CHECK: .[[CHECKLABEL:[._0-9A-Za-z]+]]: # %for.check
+;CHECK: lwz [[TAGREG:[0-9]+]], 0([[TAGPTRREG]])
+;CHECK: # %test1
+;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
+;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: # %test2
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: bne 0, .[[OPT2LABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: .[[TEST3LABEL:[._0-9A-Za-z]+]]: # %test3
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
+;CHECK-NEXT: bne 0, .[[OPT3LABEL:[._0-9A-Za-z]+]]
+;CHECK-NEXT: .[[TEST4LABEL:[._0-9A-Za-z]+]]: # %{{(test4|optional3)}}
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 28, 28
+;CHECK-NEXT: beq 0, .[[LATCHLABEL]]
+;CHECK-NEXT: b .[[OPT4LABEL:[._0-9A-Za-z]+]]
+;CHECK: [[OPT1LABEL]]
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
+;CHECK-NEXT: beq 0, .[[TEST3LABEL]]
+;CHECK-NEXT: .[[OPT2LABEL]]
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 29, 29
+;CHECK-NEXT: beq 0, .[[TEST4LABEL]]
+;CHECK-NEXT: .[[OPT3LABEL]]
+;CHECK: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 28, 28
+;CHECK-NEXT: beq 0, .[[LATCHLABEL]]
+;CHECK: [[OPT4LABEL]]:
+;CHECK: b .[[LATCHLABEL]]
+define void @loop_test(i32* %tags, i32 %count) {
+entry:
+ br label %for.check
+for.check:
+ %count.loop = phi i32 [%count, %entry], [%count.sub, %for.latch]
+ %done.count = icmp ugt i32 %count.loop, 0
+ %tag_ptr = getelementptr inbounds i32, i32* %tags, i32 %count
+ %tag = load i32, i32* %tag_ptr
+ %done.tag = icmp eq i32 %tag, 0
+ %done = and i1 %done.count, %done.tag
+ br i1 %done, label %test1, label %exit, !prof !1
+test1:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %test2, label %optional1, !prof !1
+optional1:
+ call void @a()
+ call void @a()
+ call void @a()
+ call void @a()
+ br label %test2
+test2:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp eq i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %test3, label %optional2, !prof !1
+optional2:
+ call void @b()
+ call void @b()
+ call void @b()
+ call void @b()
+ br label %test3
+test3:
+ %tagbit3 = and i32 %tag, 4
+ %tagbit3eq0 = icmp eq i32 %tagbit3, 0
+ br i1 %tagbit3eq0, label %test4, label %optional3, !prof !1
+optional3:
+ call void @c()
+ call void @c()
+ call void @c()
+ call void @c()
+ br label %test4
+test4:
+ %tagbit4 = and i32 %tag, 8
+ %tagbit4eq0 = icmp eq i32 %tagbit4, 0
+ br i1 %tagbit4eq0, label %for.latch, label %optional4, !prof !1
+optional4:
+ call void @d()
+ call void @d()
+ call void @d()
+ call void @d()
+ br label %for.latch
+for.latch:
+ %count.sub = sub i32 %count.loop, 1
+ br label %for.check
+exit:
+ ret void
+}
+
+; The block then2 is not unavoidable, meaning it does not dominate the exit.
+; But since it can be tail-duplicated, it should be placed as a fallthrough from
+; test2 and copied. The purpose here is to make sure that the tail-duplication
+; code is independent of the outlining code, which works by choosing the
+; "unavoidable" blocks.
+; CHECK-LABEL: avoidable_test:
+; CHECK: # %entry
+; CHECK: andi.
+; CHECK: # %test2
+; Make sure then2 falls through from test2
+; CHECK-NOT: # %{{[-_a-zA-Z0-9]+}}
+; CHECK: # %then2
+; CHECK: rlwinm. {{[0-9]+}}, {{[0-9]+}}, 0, 29, 29
+; CHECK: # %else1
+; CHECK: bl a
+; CHECK: bl a
+; Make sure then2 was copied into else1
+; CHECK: rlwinm. {{[0-9]+}}, {{[0-9]+}}, 0, 29, 29
+; CHECK: # %end1
+; CHECK: bl d
+; CHECK: # %else2
+; CHECK: bl c
+; CHECK: # %end2
+define void @avoidable_test(i32 %tag) {
+entry:
+ br label %test1
+test1:
+ %tagbit1 = and i32 %tag, 1
+ %tagbit1eq0 = icmp eq i32 %tagbit1, 0
+ br i1 %tagbit1eq0, label %test2, label %else1, !prof !1 ; %test2 more likely
+else1:
+ call void @a()
+ call void @a()
+ br label %then2
+test2:
+ %tagbit2 = and i32 %tag, 2
+ %tagbit2eq0 = icmp eq i32 %tagbit2, 0
+ br i1 %tagbit2eq0, label %then2, label %else2, !prof !1 ; %then2 more likely
+then2:
+ %tagbit3 = and i32 %tag, 4
+ %tagbit3eq0 = icmp eq i32 %tagbit3, 0
+ br i1 %tagbit3eq0, label %end2, label %end1, !prof !1 ; %end2 more likely
+else2:
+ call void @c()
+ br label %end2
+end2:
+ ret void
+end1:
+ call void @d()
+ ret void
+}
+
+; CHECK-LABEL: trellis_test
+; The number in the block labels is the expected block frequency given the
+; probabilities annotated. There is a conflict in the b;c->d;e trellis that
+; should be resolved as c->e;b->d.
+; The d;e->f;g trellis should be resolved as e->g;d->f.
+; The f;g->h;i trellis should be resolved as f->i;g->h.
+; The h;i->j;ret trellis contains a triangle edge, and should be resolved as
+; h->j->ret
+; CHECK: # %entry
+; CHECK: # %c10
+; CHECK: # %e9
+; CHECK: # %g10
+; CHECK: # %h10
+; CHECK: # %j8
+; CHECK: # %ret
+; CHECK: # %b6
+; CHECK: # %d7
+; CHECK: # %f6
+; CHECK: # %i6
+define void @trellis_test(i32 %tag) {
+entry:
+ br label %a16
+a16:
+ call void @a()
+ call void @a()
+ %tagbits.a = and i32 %tag, 3
+ %tagbits.a.eq0 = icmp eq i32 %tagbits.a, 0
+ br i1 %tagbits.a.eq0, label %c10, label %b6, !prof !1 ; 10 to 6
+c10:
+ call void @c()
+ call void @c()
+ %tagbits.c = and i32 %tag, 12
+ %tagbits.c.eq0 = icmp eq i32 %tagbits.c, 0
+ ; Both of these edges should be hotter than the other incoming edge
+ ; for e9 or d7
+ br i1 %tagbits.c.eq0, label %e9, label %d7, !prof !3 ; 6 to 4
+e9:
+ call void @e()
+ call void @e()
+ %tagbits.e = and i32 %tag, 48
+ %tagbits.e.eq0 = icmp eq i32 %tagbits.e, 0
+ br i1 %tagbits.e.eq0, label %g10, label %f6, !prof !4 ; 7 to 2
+g10:
+ call void @g()
+ call void @g()
+ %tagbits.g = and i32 %tag, 192
+ %tagbits.g.eq0 = icmp eq i32 %tagbits.g, 0
+ br i1 %tagbits.g.eq0, label %i6, label %h10, !prof !5 ; 2 to 8
+i6:
+ call void @i()
+ call void @i()
+ %tagbits.i = and i32 %tag, 768
+ %tagbits.i.eq0 = icmp eq i32 %tagbits.i, 0
+ br i1 %tagbits.i.eq0, label %ret, label %j8, !prof !2 ; balanced (3 to 3)
+b6:
+ call void @b()
+ call void @b()
+ %tagbits.b = and i32 %tag, 12
+ %tagbits.b.eq1 = icmp eq i32 %tagbits.b, 8
+ br i1 %tagbits.b.eq1, label %e9, label %d7, !prof !2 ; balanced (3 to 3)
+d7:
+ call void @d()
+ call void @d()
+ %tagbits.d = and i32 %tag, 48
+ %tagbits.d.eq1 = icmp eq i32 %tagbits.d, 32
+ br i1 %tagbits.d.eq1, label %g10, label %f6, !prof !6 ; 3 to 4
+f6:
+ call void @f()
+ call void @f()
+ %tagbits.f = and i32 %tag, 192
+ %tagbits.f.eq1 = icmp eq i32 %tagbits.f, 128
+ br i1 %tagbits.f.eq1, label %i6, label %h10, !prof !7 ; 4 to 2
+h10:
+ call void @h()
+ call void @h()
+ %tagbits.h = and i32 %tag, 768
+ %tagbits.h.eq1 = icmp eq i32 %tagbits.h, 512
+ br i1 %tagbits.h.eq1, label %ret, label %j8, !prof !2 ; balanced (5 to 5)
+j8:
+ call void @j()
+ call void @j()
+ br label %ret
+ret:
+ ret void
+}
+
+; Verify that we still consider tail-duplication opportunities if we find a
+; triangle trellis. Here D->F->G is the triangle, and D;E are both predecessors
+; of both F and G. The basic trellis algorithm picks the F->G edge, but after
+; checking, it's profitable to duplicate G into F. The weights here are not
+; really important. They are there to help make the test stable.
+; CHECK-LABEL: trellis_then_dup_test
+; CHECK: # %entry
+; CHECK: # %b
+; CHECK: # %d
+; CHECK: # %g
+; CHECK: # %ret1
+; CHECK: # %c
+; CHECK: # %e
+; CHECK: # %f
+; CHECK: # %ret2
+; CHECK: # %ret
+define void @trellis_then_dup_test(i32 %tag) {
+entry:
+ br label %a
+a:
+ call void @a()
+ call void @a()
+ %tagbits.a = and i32 %tag, 3
+ %tagbits.a.eq0 = icmp eq i32 %tagbits.a, 0
+ br i1 %tagbits.a.eq0, label %b, label %c, !prof !1 ; 5 to 3
+b:
+ call void @b()
+ call void @b()
+ %tagbits.b = and i32 %tag, 12
+ %tagbits.b.eq1 = icmp eq i32 %tagbits.b, 8
+ br i1 %tagbits.b.eq1, label %d, label %e, !prof !1 ; 5 to 3
+d:
+ call void @d()
+ call void @d()
+ %tagbits.d = and i32 %tag, 48
+ %tagbits.d.eq1 = icmp eq i32 %tagbits.d, 32
+ br i1 %tagbits.d.eq1, label %g, label %f, !prof !1 ; 5 to 3
+f:
+ call void @f()
+ call void @f()
+ br label %g
+g:
+ %tagbits.g = and i32 %tag, 192
+ %tagbits.g.eq0 = icmp eq i32 %tagbits.g, 0
+ br i1 %tagbits.g.eq0, label %ret1, label %ret2, !prof !2 ; balanced
+c:
+ call void @c()
+ call void @c()
+ %tagbits.c = and i32 %tag, 12
+ %tagbits.c.eq0 = icmp eq i32 %tagbits.c, 0
+ br i1 %tagbits.c.eq0, label %d, label %e, !prof !1 ; 5 to 3
+e:
+ call void @e()
+ call void @e()
+ %tagbits.e = and i32 %tag, 48
+ %tagbits.e.eq0 = icmp eq i32 %tagbits.e, 0
+ br i1 %tagbits.e.eq0, label %g, label %f, !prof !1 ; 5 to 3
+ret1:
+ call void @a()
+ br label %ret
+ret2:
+ call void @b()
+ br label %ret
+ret:
+ ret void
+}
+
+; Verify that we did not mis-identify triangle trellises if it is not
+; really a triangle.
+; CHECK-LABEL: trellis_no_triangle
+; CHECK: # %entry
+; CHECK: # %b
+; CHECK: # %d
+; CHECK: # %ret
+; CHECK: # %c
+; CHECK: # %e
+define void @trellis_no_triangle(i32 %tag) {
+entry:
+ br label %a
+a:
+ call void @a()
+ call void @a()
+ %tagbits.a = and i32 %tag, 3
+ %tagbits.a.eq0 = icmp eq i32 %tagbits.a, 0
+ br i1 %tagbits.a.eq0, label %b, label %c, !prof !8 ; 98 to 2
+b:
+ call void @b()
+ call void @b()
+ %tagbits.b = and i32 %tag, 12
+ %tagbits.b.eq1 = icmp eq i32 %tagbits.b, 8
+ br i1 %tagbits.b.eq1, label %d, label %e, !prof !9 ; 97 to 1
+d:
+ call void @d()
+ call void @d()
+ %tagbits.d = and i32 %tag, 48
+ %tagbits.d.eq1 = icmp eq i32 %tagbits.d, 32
+ br i1 %tagbits.d.eq1, label %ret, label %e, !prof !10 ; 96 to 2
+c:
+ call void @c()
+ call void @c()
+ %tagbits.c = and i32 %tag, 12
+ %tagbits.c.eq0 = icmp eq i32 %tagbits.c, 0
+ br i1 %tagbits.c.eq0, label %d, label %e, !prof !2 ; 1 to 1
+e:
+ call void @e()
+ call void @e()
+ br label %ret
+ret:
+ call void @f()
+ ret void
+}
+
declare void @a()
declare void @b()
declare void @c()
declare void @d()
+declare void @e()
+declare void @f()
+declare void @g()
+declare void @h()
+declare void @i()
+declare void @j()
+
+!1 = !{!"branch_weights", i32 5, i32 3}
+!2 = !{!"branch_weights", i32 50, i32 50}
+!3 = !{!"branch_weights", i32 6, i32 4}
+!4 = !{!"branch_weights", i32 7, i32 2}
+!5 = !{!"branch_weights", i32 2, i32 8}
+!6 = !{!"branch_weights", i32 3, i32 4}
+!7 = !{!"branch_weights", i32 4, i32 2}
+!8 = !{!"branch_weights", i32 98, i32 2}
+!9 = !{!"branch_weights", i32 97, i32 1}
+!10 = !{!"branch_weights", i32 96, i32 2}
diff --git a/test/CodeGen/PowerPC/toc-load-sched-bug.ll b/test/CodeGen/PowerPC/toc-load-sched-bug.ll
index e83124cbb990..21ccbf6f1ead 100644
--- a/test/CodeGen/PowerPC/toc-load-sched-bug.ll
+++ b/test/CodeGen/PowerPC/toc-load-sched-bug.ll
@@ -223,7 +223,7 @@ if.then: ; preds = %_ZNK4llvm7ErrorOrIS
%10 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i2.i, i64 0, i32 0
%11 = bitcast %"class.llvm::SMDiagnostic"* %ref.tmp to i8*
call void @llvm.memset.p0i8.i64(i8* %11, i8 0, i64 16, i32 8, i1 false) #3
- call void @llvm.lifetime.start(i64 1, i8* %10) #3
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %10) #3
%tobool.i.i4.i = icmp eq i8* %4, null
br i1 %tobool.i.i4.i, label %if.then.i.i6.i, label %if.end.i.i8.i
@@ -237,7 +237,7 @@ if.end.i.i8.i: ; preds = %if.then
br label %_ZNK4llvm9StringRefcvSsEv.exit9.i
_ZNK4llvm9StringRefcvSsEv.exit9.i: ; preds = %if.end.i.i8.i, %if.then.i.i6.i
- call void @llvm.lifetime.end(i64 1, i8* %10) #3
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %10) #3
%LineNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 3
store i32 -1, i32* %LineNo.i, align 8, !tbaa !14
%ColumnNo.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 4
@@ -246,7 +246,7 @@ _ZNK4llvm9StringRefcvSsEv.exit9.i: ; preds = %if.end.i.i8.i, %if.
store i32 0, i32* %Kind.i, align 8, !tbaa !22
%Message.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 6
%12 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i.i, i64 0, i32 0
- call void @llvm.lifetime.start(i64 1, i8* %12) #3
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %12) #3
%tobool.i.i.i = icmp eq i8* %8, null
br i1 %tobool.i.i.i, label %if.then.i.i.i, label %if.end.i.i.i
@@ -260,7 +260,7 @@ if.end.i.i.i: ; preds = %_ZNK4llvm9StringRef
br label %_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit
_ZN4llvm12SMDiagnosticC2ENS_9StringRefENS_9SourceMgr8DiagKindES1_.exit: ; preds = %if.then.i.i.i, %if.end.i.i.i
- call void @llvm.lifetime.end(i64 1, i8* %12) #3
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %12) #3
%_M_p.i.i.i.i.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 7, i32 0, i32 0
store i8* bitcast (i64* getelementptr inbounds ([0 x i64], [0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE, i64 0, i64 3) to i8*), i8** %_M_p.i.i.i.i.i, align 8, !tbaa !13
%Ranges.i = getelementptr inbounds %"class.llvm::SMDiagnostic", %"class.llvm::SMDiagnostic"* %ref.tmp, i64 0, i32 8
@@ -320,7 +320,7 @@ _ZN4llvm12SMDiagnosticaSEOS0_.exit: ; preds = %_ZN4llvm12SMDiagnos
%call2.i.i42 = call dereferenceable(48) %"class.llvm::SmallVectorImpl.85"* @_ZN4llvm15SmallVectorImplINS_7SMFixItEEaSEOS2_(%"class.llvm::SmallVectorImpl.85"* %24, %"class.llvm::SmallVectorImpl.85"* dereferenceable(48) %25) #3
call void @_ZN4llvm12SMDiagnosticD2Ev(%"class.llvm::SMDiagnostic"* %ref.tmp) #3
%26 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i, i64 0, i32 0
- call void @llvm.lifetime.start(i64 1, i8* %26) #3
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %26) #3
%27 = bitcast i8* %arrayidx.i.i.i36 to %"struct.std::basic_string<char, std::char_traits<char>, std::allocator<char> >::_Rep"*
%cmp.i.i.i = icmp eq i8* %arrayidx.i.i.i36, bitcast ([0 x i64]* @_ZNSs4_Rep20_S_empty_rep_storageE to i8*)
br i1 %cmp.i.i.i, label %_ZNSsD1Ev.exit, label %if.then.i.i.i45, !prof !28
@@ -332,11 +332,11 @@ if.then.i.i.i45: ; preds = %_ZN4llvm12SMDiagnos
if.then.i.i.i.i: ; preds = %if.then.i.i.i45
%.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast = bitcast i32* %.atomicdst.i.i.i.i.i to i8*
- call void @llvm.lifetime.start(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
%29 = atomicrmw volatile add i32* %28, i32 -1 acq_rel
store i32 %29, i32* %.atomicdst.i.i.i.i.i, align 4
%.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i = load volatile i32, i32* %.atomicdst.i.i.i.i.i, align 4
- call void @llvm.lifetime.end(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i
if.else.i.i.i.i: ; preds = %if.then.i.i.i45
@@ -355,9 +355,9 @@ if.then4.i.i.i: ; preds = %_ZN9__gnu_cxxL27__e
br label %_ZNSsD1Ev.exit
_ZNSsD1Ev.exit: ; preds = %_ZN4llvm12SMDiagnosticaSEOS0_.exit, %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i, %if.then4.i.i.i
- call void @llvm.lifetime.end(i64 1, i8* %26) #3
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %26) #3
%31 = getelementptr inbounds %"class.std::allocator", %"class.std::allocator"* %ref.tmp.i.i47, i64 0, i32 0
- call void @llvm.lifetime.start(i64 1, i8* %31) #3
+ call void @llvm.lifetime.start.p0i8(i64 1, i8* %31) #3
%_M_p.i.i.i.i48 = getelementptr inbounds %"class.std::basic_string", %"class.std::basic_string"* %ref.tmp5, i64 0, i32 0, i32 0
%32 = load i8*, i8** %_M_p.i.i.i.i48, align 8, !tbaa !1
%arrayidx.i.i.i49 = getelementptr inbounds i8, i8* %32, i64 -24
@@ -372,11 +372,11 @@ if.then.i.i.i52: ; preds = %_ZNSsD1Ev.exit
if.then.i.i.i.i55: ; preds = %if.then.i.i.i52
%.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast = bitcast i32* %.atomicdst.i.i.i.i.i46 to i8*
- call void @llvm.lifetime.start(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ call void @llvm.lifetime.start.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
%35 = atomicrmw volatile add i32* %34, i32 -1 acq_rel
store i32 %35, i32* %.atomicdst.i.i.i.i.i46, align 4
%.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..atomicdst.0..atomicdst.0..i.i.i.i.i54 = load volatile i32, i32* %.atomicdst.i.i.i.i.i46, align 4
- call void @llvm.lifetime.end(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
+ call void @llvm.lifetime.end.p0i8(i64 4, i8* %.atomicdst.i.i.i.i.i46.0..atomicdst.i.i.i.i.0..atomicdst.i.i.i.0..atomicdst.i.i.0..atomicdst.i.0..sroa_cast)
br label %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60
if.else.i.i.i.i57: ; preds = %if.then.i.i.i52
@@ -395,7 +395,7 @@ if.then4.i.i.i61: ; preds = %_ZN9__gnu_cxxL27__e
br label %_ZNSsD1Ev.exit62
_ZNSsD1Ev.exit62: ; preds = %_ZNSsD1Ev.exit, %_ZN9__gnu_cxxL27__exchange_and_add_dispatchEPii.exit.i.i.i60, %if.then4.i.i.i61
- call void @llvm.lifetime.end(i64 1, i8* %31) #3
+ call void @llvm.lifetime.end.p0i8(i64 1, i8* %31) #3
br label %cleanup
cond.false.i.i: ; preds = %_ZNK4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEE8getErrorEv.exit
@@ -438,10 +438,10 @@ _ZN4llvm7ErrorOrISt10unique_ptrINS_12MemoryBufferESt14default_deleteIS2_EEED2Ev.
}
; Function Attrs: nounwind
-declare void @llvm.lifetime.start(i64, i8* nocapture) #3
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #3
; Function Attrs: nounwind
-declare void @llvm.lifetime.end(i64, i8* nocapture) #3
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #3
; Function Attrs: noreturn nounwind
declare void @__assert_fail(i8*, i8*, i32 zeroext, i8*) #4
diff --git a/test/CodeGen/PowerPC/vec_absd.ll b/test/CodeGen/PowerPC/vec_absd.ll
index 37a3a5c94a33..268587bb2eaf 100644
--- a/test/CodeGen/PowerPC/vec_absd.ll
+++ b/test/CodeGen/PowerPC/vec_absd.ll
@@ -18,7 +18,7 @@ entry:
ret <16 x i8> %res
; CHECK-LABEL: @test_byte
; CHECK: vabsdub 2, 2, 3
-; CHECK blr
+; CHECK: blr
}
define <8 x i16> @test_half(<8 x i16> %a, <8 x i16> %b) {
@@ -27,7 +27,7 @@ entry:
ret <8 x i16> %res
; CHECK-LABEL: @test_half
; CHECK: vabsduh 2, 2, 3
-; CHECK blr
+; CHECK: blr
}
define <4 x i32> @test_word(<4 x i32> %a, <4 x i32> %b) {
diff --git a/test/CodeGen/PowerPC/vec_cmp.ll b/test/CodeGen/PowerPC/vec_cmp.ll
index 0eaac554aa4d..88de9a17d91e 100644
--- a/test/CodeGen/PowerPC/vec_cmp.ll
+++ b/test/CodeGen/PowerPC/vec_cmp.ll
@@ -54,7 +54,7 @@ entry:
}
; CHECK-LABEL: v16si8_cmp_ne:
; CHECK: vcmpequb [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <16 x i8> @v16si8_cmp_le(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
entry:
@@ -64,7 +64,7 @@ entry:
}
; CHECK-LABEL: v16si8_cmp_le:
; CHECK: vcmpgtsb [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <16 x i8> @v16ui8_cmp_le(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
entry:
@@ -74,7 +74,7 @@ entry:
}
; CHECK-LABEL: v16ui8_cmp_le:
; CHECK: vcmpgtub [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <16 x i8> @v16si8_cmp_lt(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
entry:
@@ -120,7 +120,7 @@ entry:
}
; CHECK-LABEL: v16si8_cmp_ge:
; CHECK: vcmpgtsb [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <16 x i8> @v16ui8_cmp_ge(<16 x i8> %x, <16 x i8> %y) nounwind readnone {
entry:
@@ -130,7 +130,7 @@ entry:
}
; CHECK-LABEL: v16ui8_cmp_ge:
; CHECK: vcmpgtub [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <32 x i8> @v32si8_cmp(<32 x i8> %x, <32 x i8> %y) nounwind readnone {
@@ -180,7 +180,7 @@ entry:
}
; CHECK-LABEL: v8si16_cmp_ne:
; CHECK: vcmpequh [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x i16> @v8si16_cmp_le(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -190,7 +190,7 @@ entry:
}
; CHECK-LABEL: v8si16_cmp_le:
; CHECK: vcmpgtsh [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x i16> @v8ui16_cmp_le(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -200,7 +200,7 @@ entry:
}
; CHECK-LABEL: v8ui16_cmp_le:
; CHECK: vcmpgtuh [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x i16> @v8si16_cmp_lt(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -246,7 +246,7 @@ entry:
}
; CHECK-LABEL: v8si16_cmp_ge:
; CHECK: vcmpgtsh [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x i16> @v8ui16_cmp_ge(<8 x i16> %x, <8 x i16> %y) nounwind readnone {
entry:
@@ -256,7 +256,7 @@ entry:
}
; CHECK-LABEL: v8ui16_cmp_ge:
; CHECK: vcmpgtuh [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <16 x i16> @v16si16_cmp(<16 x i16> %x, <16 x i16> %y) nounwind readnone {
@@ -309,7 +309,7 @@ entry:
}
; CHECK-LABEL: v4si32_cmp_ne:
; CHECK: vcmpequw [[RCMP:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RCMP]], [[RCMP]]
+; CHECK-NEXT: vnot 2, [[RCMP]]
define <4 x i32> @v4si32_cmp_le(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -319,7 +319,7 @@ entry:
}
; CHECK-LABEL: v4si32_cmp_le:
; CHECK: vcmpgtsw [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x i32> @v4ui32_cmp_le(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -329,7 +329,7 @@ entry:
}
; CHECK-LABEL: v4ui32_cmp_le:
; CHECK: vcmpgtuw [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x i32> @v4si32_cmp_lt(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -375,7 +375,7 @@ entry:
}
; CHECK-LABEL: v4si32_cmp_ge:
; CHECK: vcmpgtsw [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x i32> @v4ui32_cmp_ge(<4 x i32> %x, <4 x i32> %y) nounwind readnone {
entry:
@@ -385,7 +385,7 @@ entry:
}
; CHECK-LABEL: v4ui32_cmp_ge:
; CHECK: vcmpgtuw [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x i32> @v8si32_cmp(<8 x i32> %x, <8 x i32> %y) nounwind readnone {
@@ -458,7 +458,7 @@ entry:
}
; CHECK-LABEL: v4f32_cmp_ne:
; CHECK: vcmpeqfp [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x float> @v4f32_cmp_le(<4 x float> %x, <4 x float> %y) nounwind readnone {
entry:
@@ -509,7 +509,7 @@ entry:
}
; CHECK-LABEL: v4f32_cmp_ule:
; CHECK: vcmpgtfp [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x float> @v4f32_cmp_ult(<4 x float> %x, <4 x float> %y) nounwind readnone {
entry:
@@ -520,7 +520,7 @@ entry:
}
; CHECK-LABEL: v4f32_cmp_ult:
; CHECK: vcmpgefp [[RET:[0-9]+]], 2, 3
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x float> @v4f32_cmp_uge(<4 x float> %x, <4 x float> %y) nounwind readnone {
entry:
@@ -531,7 +531,7 @@ entry:
}
; CHECK-LABEL: v4f32_cmp_uge:
; CHECK: vcmpgtfp [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <4 x float> @v4f32_cmp_ugt(<4 x float> %x, <4 x float> %y) nounwind readnone {
entry:
@@ -542,7 +542,7 @@ entry:
}
; CHECK-LABEL: v4f32_cmp_ugt:
; CHECK: vcmpgefp [[RET:[0-9]+]], 3, 2
-; CHECK-NEXT: vnor 2, [[RET]], [[RET]]
+; CHECK-NEXT: vnot 2, [[RET]]
define <8 x float> @v8f32_cmp(<8 x float> %x, <8 x float> %y) nounwind readnone {
diff --git a/test/CodeGen/PowerPC/vsx-args.ll b/test/CodeGen/PowerPC/vsx-args.ll
index 252f9b360b96..7fa31aea84ba 100644
--- a/test/CodeGen/PowerPC/vsx-args.ll
+++ b/test/CodeGen/PowerPC/vsx-args.ll
@@ -13,10 +13,10 @@ entry:
ret <2 x double> %v
; CHECK-LABEL: @main
-; CHECK-DAG: vor [[V:[0-9]+]], 2, 2
-; CHECK-DAG: vor 2, 3, 3
-; CHECK-DAG: vor 3, 4, 4
-; CHECK-DAG: vor 4, [[V]], [[V]]
+; CHECK-DAG: vmr [[V:[0-9]+]], 2
+; CHECK-DAG: vmr 2, 3
+; CHECK-DAG: vmr 3, 4
+; CHECK-DAG: vmr 4, [[V]]
; CHECK: bl sv
; CHECK: lxvd2x [[VC:[0-9]+]],
; CHECK: xvadddp 34, 34, [[VC]]
@@ -24,8 +24,8 @@ entry:
; CHECK-FISL-LABEL: @main
; CHECK-FISL: stxvd2x 34
-; CHECK-FISL: vor 2, 3, 3
-; CHECK-FISL: vor 3, 4, 4
+; CHECK-FISL: vmr 2, 3
+; CHECK-FISL: vmr 3, 4
; CHECK-FISL: lxvd2x 36
; CHECK-FISL: bl sv
; CHECK-FISL: lxvd2x [[VC:[0-9]+]],
diff --git a/test/CodeGen/PowerPC/vsx-infl-copy1.ll b/test/CodeGen/PowerPC/vsx-infl-copy1.ll
index 592f85e2bcaf..1d6718279a0d 100644
--- a/test/CodeGen/PowerPC/vsx-infl-copy1.ll
+++ b/test/CodeGen/PowerPC/vsx-infl-copy1.ll
@@ -11,15 +11,15 @@ entry:
br label %vector.body
; CHECK-LABEL: @_Z8example9Pj
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
-; CHECK: vor
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
+; CHECK: vmr
vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
diff --git a/test/CodeGen/PowerPC/vsx-p9.ll b/test/CodeGen/PowerPC/vsx-p9.ll
index e8a0a3bcf92a..ba359501ccc5 100644
--- a/test/CodeGen/PowerPC/vsx-p9.ll
+++ b/test/CodeGen/PowerPC/vsx-p9.ll
@@ -277,8 +277,8 @@ entry:
%0 = tail call <2 x i64> @llvm.ppc.vsx.xvxexpdp(<2 x double> %a)
ret <2 x i64> %0
; CHECK-LABEL: testXVXEXPDP
-; CHECK xvxexpdp 34, 34
-; CHECK blr
+; CHECK: xvxexpdp 34, 34
+; CHECK: blr
}
; Function Attrs: nounwind readnone
declare <2 x i64>@llvm.ppc.vsx.xvxexpdp(<2 x double>)
@@ -289,8 +289,8 @@ entry:
%0 = tail call <4 x i32> @llvm.ppc.vsx.xvxsigsp(<4 x float> %a)
ret <4 x i32> %0
; CHECK-LABEL: testXVXSIGSP
-; CHECK xvxsigsp 34, 34
-; CHECK blr
+; CHECK: xvxsigsp 34, 34
+; CHECK: blr
}
; Function Attrs: nounwind readnone
declare <4 x i32> @llvm.ppc.vsx.xvxsigsp(<4 x float>)
@@ -301,8 +301,8 @@ entry:
%0 = tail call <2 x i64> @llvm.ppc.vsx.xvxsigdp(<2 x double> %a)
ret <2 x i64> %0
; CHECK-LABEL: testXVXSIGDP
-; CHECK xvxsigdp 34, 34
-; CHECK blr
+; CHECK: xvxsigdp 34, 34
+; CHECK: blr
}
; Function Attrs: nounwind readnone
declare <2 x i64> @llvm.ppc.vsx.xvxsigdp(<2 x double>)